From 7540d573c8b41f5f10ea6e4e5f6b791bb97cb911 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 6 Mar 2024 16:34:56 -0500 Subject: [PATCH] 1.21 -> 1.29 (#3470) Signed-off-by: Joe Elliott --- example/tk/jsonnetfile.json | 2 +- example/tk/jsonnetfile.lock.json | 38 +- example/tk/vendor/1.21 | 1 - example/tk/vendor/1.29 | 1 + .../grafana-builder/grafana.libsonnet | 74 ++- .../jsonnet-libs/grafana/config.libsonnet | 9 + .../jsonnet-libs/grafana/configmaps.libsonnet | 2 +- .../ksonnet-util/grafana.libsonnet | 4 - .../ksonnet-util/legacy-custom.libsonnet | 4 - .../ksonnet-util/legacy-noname.libsonnet | 5 - .../ksonnet-util/legacy-subtypes.libsonnet | 16 +- .../ksonnet-util/legacy-types.libsonnet | 9 +- .../jsonnet-libs/ksonnet-util/util.libsonnet | 29 +- .../jsonnet-libs/kube-state-metrics/README.md | 2 +- .../kube-state-metrics/main.libsonnet | 11 +- .../scrape_config.libsonnet | 10 + .../memcached/memcached.libsonnet | 34 +- .../jsonnet-libs/prometheus/config.libsonnet | 2 +- .../prometheus/ha-mixin.libsonnet | 7 + .../jsonnet-libs/prometheus/images.libsonnet | 2 +- .../prometheus/prometheus.libsonnet | 2 +- .../grafana/jsonnet-libs/tanka-util/README.md | 13 +- .../tanka-util/environment.libsonnet | 16 + .../jsonnet-libs/docsonnet/doc-util/README.md | 288 ++++++++--- .../docsonnet/doc-util/main.libsonnet | 198 +++++++- .../docsonnet/doc-util/render.libsonnet | 479 ++++++++++++++++++ .../1.21/_custom/volumeMounts.libsonnet | 169 ------ .../v1beta1/main.libsonnet | 11 - .../v1beta1/mutatingWebhook.libsonnet | 66 --- .../mutatingWebhookConfiguration.libsonnet | 60 --- .../v1beta1/validatingWebhook.libsonnet | 64 --- .../validatingWebhookConfiguration.libsonnet | 60 --- .../v1/apiServiceSpec.libsonnet | 27 - .../v1beta1/apiService.libsonnet | 80 --- .../v1beta1/apiServiceSpec.libsonnet | 27 - .../apiregistration/v1beta1/main.libsonnet | 9 - ...rollingUpdateStatefulSetStrategy.libsonnet | 8 - .../authentication/v1beta1/main.libsonnet | 8 - .../v1beta1/tokenReview.libsonnet | 65 --- .../localSubjectAccessReview.libsonnet | 95 ---- .../_gen/authorization/v1beta1/main.libsonnet | 17 - .../v1beta1/selfSubjectAccessReview.libsonnet | 83 --- .../v1beta1/subjectAccessReview.libsonnet | 95 ---- .../v1beta1/subjectAccessReviewSpec.libsonnet | 42 -- .../v1/crossVersionObjectReference.libsonnet | 10 - .../_gen/autoscaling/v1/scaleStatus.libsonnet | 10 - .../containerResourceMetricSource.libsonnet | 14 - .../containerResourceMetricStatus.libsonnet | 14 - .../crossVersionObjectReference.libsonnet | 10 - .../v2beta1/externalMetricSource.libsonnet | 23 - .../v2beta1/externalMetricStatus.libsonnet | 23 - .../v2beta1/horizontalPodAutoscaler.libsonnet | 76 --- .../horizontalPodAutoscalerSpec.libsonnet | 23 - .../_gen/autoscaling/v2beta1/main.libsonnet | 21 - .../autoscaling/v2beta1/metricSpec.libsonnet | 95 ---- .../v2beta1/metricStatus.libsonnet | 95 ---- .../v2beta1/objectMetricSource.libsonnet | 32 -- .../v2beta1/objectMetricStatus.libsonnet | 32 -- .../v2beta1/podsMetricSource.libsonnet | 21 - .../v2beta1/podsMetricStatus.libsonnet | 21 - .../v2beta1/resourceMetricSource.libsonnet | 12 - .../v2beta1/resourceMetricStatus.libsonnet | 12 - .../containerResourceMetricSource.libsonnet | 21 - .../containerResourceMetricStatus.libsonnet | 19 - .../crossVersionObjectReference.libsonnet | 10 - .../v2beta2/externalMetricSource.libsonnet | 33 -- .../v2beta2/externalMetricStatus.libsonnet | 31 -- .../autoscaling/v2beta2/metricSpec.libsonnet | 141 ------ .../v2beta2/metricStatus.libsonnet | 131 ----- .../v2beta2/metricTarget.libsonnet | 14 - .../v2beta2/metricValueStatus.libsonnet | 12 - .../v2beta2/objectMetricSource.libsonnet | 42 -- .../v2beta2/objectMetricStatus.libsonnet | 40 -- .../v2beta2/podsMetricSource.libsonnet | 33 -- .../v2beta2/podsMetricStatus.libsonnet | 31 -- .../v2beta2/resourceMetricSource.libsonnet | 19 - .../v2beta2/resourceMetricStatus.libsonnet | 17 - .../_gen/batch/v1/jobTemplateSpec.libsonnet | 320 ------------ .../1.21/_gen/batch/v1beta1/cronJob.libsonnet | 388 -------------- .../_gen/batch/v1beta1/cronJobSpec.libsonnet | 335 ------------ .../1.21/_gen/batch/v1beta1/main.libsonnet | 8 - .../certificateSigningRequest.libsonnet | 79 --- ...rtificateSigningRequestCondition.libsonnet | 16 - .../certificateSigningRequestSpec.libsonnet | 26 - .../certificateSigningRequestStatus.libsonnet | 12 - .../_gen/certificates/v1beta1/main.libsonnet | 8 - .../_gen/coordination/v1beta1/main.libsonnet | 6 - ...awsElasticBlockStoreVolumeSource.libsonnet | 14 - .../core/v1/azureDiskVolumeSource.libsonnet | 18 - .../azureFilePersistentVolumeSource.libsonnet | 14 - .../core/v1/azureFileVolumeSource.libsonnet | 12 - .../v1/cephFSPersistentVolumeSource.libsonnet | 25 - .../_gen/core/v1/cephFSVolumeSource.libsonnet | 23 - .../core/v1/configMapProjection.libsonnet | 14 - .../core/v1/configMapVolumeSource.libsonnet | 16 - .../core/v1/downwardAPIVolumeFile.libsonnet | 26 - .../core/v1/emptyDirVolumeSource.libsonnet | 10 - .../1.21/_gen/core/v1/endpointPort.libsonnet | 14 - .../core/v1/ephemeralContainers.libsonnet | 60 --- .../_gen/core/v1/fcVolumeSource.libsonnet | 20 - .../gcePersistentDiskVolumeSource.libsonnet | 14 - .../v1/iscsiPersistentVolumeSource.libsonnet | 35 -- .../_gen/core/v1/iscsiVolumeSource.libsonnet | 33 -- .../1.21/_gen/core/v1/keyToPath.libsonnet | 12 - .../_gen/core/v1/localVolumeSource.libsonnet | 10 - .../v1/persistentVolumeClaimStatus.libsonnet | 20 - .../core/v1/persistentVolumeStatus.libsonnet | 12 - ...photonPersistentDiskVolumeSource.libsonnet | 10 - .../1.21/_gen/core/v1/podIP.libsonnet | 8 - .../core/v1/projectedVolumeSource.libsonnet | 12 - .../v1/rbdPersistentVolumeSource.libsonnet | 29 -- .../_gen/core/v1/rbdVolumeSource.libsonnet | 27 - .../core/v1/resourceFieldSelector.libsonnet | 12 - .../scaleIOPersistentVolumeSource.libsonnet | 31 -- .../core/v1/scaleIOVolumeSource.libsonnet | 29 -- .../_gen/core/v1/secretProjection.libsonnet | 14 - .../_gen/core/v1/secretVolumeSource.libsonnet | 16 - .../v1/topologySpreadConstraint.libsonnet | 23 - .../_gen/core/v1/volumeProjection.libsonnet | 44 -- .../vsphereVirtualDiskVolumeSource.libsonnet | 14 - .../_gen/discovery/v1/endpointPort.libsonnet | 14 - .../_gen/discovery/v1beta1/endpoint.libsonnet | 51 -- .../discovery/v1beta1/endpointHints.libsonnet | 10 - .../discovery/v1beta1/endpointPort.libsonnet | 14 - .../discovery/v1beta1/endpointSlice.libsonnet | 66 --- .../_gen/discovery/v1beta1/main.libsonnet | 10 - .../1.21/_gen/events/v1beta1/event.libsonnet | 124 ----- .../_gen/events/v1beta1/eventSeries.libsonnet | 10 - .../1.21/_gen/events/v1beta1/main.libsonnet | 6 - .../1.21/_gen/extensions/main.libsonnet | 5 - .../v1beta1/httpIngressPath.libsonnet | 26 - .../_gen/extensions/v1beta1/ingress.libsonnet | 85 ---- .../v1beta1/ingressBackend.libsonnet | 19 - .../extensions/v1beta1/ingressSpec.libsonnet | 32 -- .../v1beta1/ingressStatus.libsonnet | 13 - .../extensions/v1beta1/ingressTLS.libsonnet | 12 - .../_gen/extensions/v1beta1/main.libsonnet | 12 - .../priorityLevelConfigurationSpec.libsonnet | 27 - .../v1/httpIngressRuleValue.libsonnet | 10 - .../networking/v1/ingressStatus.libsonnet | 13 - .../_gen/networking/v1/ingressTLS.libsonnet | 12 - .../1.21/_gen/networking/v1/ipBlock.libsonnet | 12 - .../v1/networkPolicyEgressRule.libsonnet | 14 - .../v1/networkPolicyIngressRule.libsonnet | 14 - .../networking/v1/networkPolicyPort.libsonnet | 12 - .../networking/v1/networkPolicySpec.libsonnet | 29 -- .../v1beta1/httpIngressPath.libsonnet | 26 - .../v1beta1/httpIngressRuleValue.libsonnet | 10 - .../_gen/networking/v1beta1/ingress.libsonnet | 85 ---- .../v1beta1/ingressBackend.libsonnet | 19 - .../networking/v1beta1/ingressClass.libsonnet | 74 --- .../networking/v1beta1/ingressRule.libsonnet | 15 - .../networking/v1beta1/ingressSpec.libsonnet | 32 -- .../v1beta1/ingressStatus.libsonnet | 13 - .../networking/v1beta1/ingressTLS.libsonnet | 12 - .../_gen/networking/v1beta1/main.libsonnet | 15 - .../1.21/_gen/node/v1alpha1/main.libsonnet | 8 - .../_gen/node/v1alpha1/runtimeClass.libsonnet | 79 --- .../node/v1alpha1/runtimeClassSpec.libsonnet | 26 - .../1.21/_gen/node/v1beta1/main.libsonnet | 7 - .../1.21/_gen/node/v1beta1/overhead.libsonnet | 10 - .../_gen/node/v1beta1/runtimeClass.libsonnet | 76 --- .../_gen/node/v1beta1/scheduling.libsonnet | 14 - .../policy/v1beta1/allowedCSIDriver.libsonnet | 8 - .../v1beta1/allowedFlexVolume.libsonnet | 8 - .../policy/v1beta1/allowedHostPath.libsonnet | 10 - .../v1beta1/fsGroupStrategyOptions.libsonnet | 12 - .../policy/v1beta1/hostPortRange.libsonnet | 10 - .../_gen/policy/v1beta1/idRange.libsonnet | 10 - .../1.21/_gen/policy/v1beta1/main.libsonnet | 21 - .../v1beta1/podDisruptionBudget.libsonnet | 74 --- .../v1beta1/podSecurityPolicy.libsonnet | 178 ------- .../v1beta1/podSecurityPolicySpec.libsonnet | 125 ----- .../runAsGroupStrategyOptions.libsonnet | 12 - .../runAsUserStrategyOptions.libsonnet | 12 - .../runtimeClassStrategyOptions.libsonnet | 12 - .../v1beta1/seLinuxStrategyOptions.libsonnet | 19 - ...upplementalGroupsStrategyOptions.libsonnet | 12 - .../_gen/rbac/v1alpha1/clusterRole.libsonnet | 67 --- .../1.21/_gen/rbac/v1alpha1/main.libsonnet | 12 - .../_gen/rbac/v1alpha1/policyRule.libsonnet | 26 - .../1.21/_gen/rbac/v1alpha1/role.libsonnet | 60 --- .../1.21/_gen/rbac/v1alpha1/subject.libsonnet | 12 - .../rbac/v1beta1/aggregationRule.libsonnet | 10 - .../_gen/rbac/v1beta1/clusterRole.libsonnet | 67 --- .../rbac/v1beta1/clusterRoleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1beta1/main.libsonnet | 12 - .../_gen/rbac/v1beta1/policyRule.libsonnet | 26 - .../1.21/_gen/rbac/v1beta1/role.libsonnet | 60 --- .../_gen/rbac/v1beta1/roleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1beta1/roleRef.libsonnet | 12 - .../v1alpha1/priorityClass.libsonnet | 64 --- .../_gen/scheduling/v1beta1/main.libsonnet | 5 - .../v1beta1/priorityClass.libsonnet | 64 --- .../_gen/storage/v1/csiDriverSpec.libsonnet | 24 - .../1.21/_gen/storage/v1/main.libsonnet | 17 - .../v1/volumeAttachmentSource.libsonnet | 419 --------------- .../storage/v1/volumeAttachmentSpec.libsonnet | 426 ---------------- .../v1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1/volumeNodeResources.libsonnet | 8 - .../v1alpha1/csiStorageCapacity.libsonnet | 73 --- .../1.21/_gen/storage/v1alpha1/main.libsonnet | 10 - .../v1alpha1/volumeAttachment.libsonnet | 479 ------------------ .../v1alpha1/volumeAttachmentSpec.libsonnet | 426 ---------------- .../v1alpha1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1alpha1/volumeError.libsonnet | 10 - .../_gen/storage/v1beta1/csiDriver.libsonnet | 77 --- .../storage/v1beta1/csiDriverSpec.libsonnet | 24 - .../_gen/storage/v1beta1/csiNode.libsonnet | 63 --- .../storage/v1beta1/storageClass.libsonnet | 76 --- .../storage/v1beta1/tokenRequest.libsonnet | 10 - .../v1beta1/volumeAttachment.libsonnet | 479 ------------------ .../v1beta1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1beta1/volumeError.libsonnet | 10 - .../v1beta1/volumeNodeResources.libsonnet | 8 - .../{1.21 => 1.29}/_custom/apps.libsonnet | 8 +- .../1.29}/_custom/autoscaling.libsonnet | 2 - .../1.29}/_custom/batch.libsonnet | 2 - .../1.29}/_custom/core.libsonnet | 0 .../{1.21 => 1.29}/_custom/list.libsonnet | 0 .../1.29}/_custom/mapContainers.libsonnet | 29 +- .../{1.21 => 1.29}/_custom/rbac.libsonnet | 2 - .../1.29}/_custom/volumeMounts.libsonnet | 17 +- .../_gen/admissionregistration/main.libsonnet | 1 + .../admissionregistration/v1/main.libsonnet | 1 + .../v1/matchCondition.libsonnet | 10 + .../v1/mutatingWebhook.libsonnet | 4 + .../v1/mutatingWebhookConfiguration.libsonnet | 20 +- .../v1/ruleWithOperations.libsonnet | 0 .../v1/serviceReference.libsonnet | 0 .../v1/validatingWebhook.libsonnet | 4 + .../validatingWebhookConfiguration.libsonnet | 20 +- .../v1/webhookClientConfig.libsonnet | 0 .../v1alpha1/auditAnnotation.libsonnet | 10 + .../v1alpha1/expressionWarning.libsonnet | 10 + .../v1alpha1/main.libsonnet | 19 + .../v1alpha1/matchCondition.libsonnet | 10 + .../v1alpha1/matchResources.libsonnet | 38 ++ .../namedRuleWithOperations.libsonnet | 6 +- .../v1alpha1/paramKind.libsonnet | 8 + .../v1alpha1/paramRef.libsonnet} | 12 +- .../v1alpha1/typeChecking.libsonnet | 10 + .../validatingAdmissionPolicy.libsonnet | 117 +++++ ...validatingAdmissionPolicyBinding.libsonnet | 118 +++++ ...datingAdmissionPolicyBindingSpec.libsonnet | 67 +++ .../validatingAdmissionPolicySpec.libsonnet | 66 +++ .../validatingAdmissionPolicyStatus.libsonnet | 19 + .../v1alpha1/validation.libsonnet | 14 + .../v1alpha1/variable.libsonnet | 10 + .../v1beta1/auditAnnotation.libsonnet | 10 + .../v1beta1/expressionWarning.libsonnet | 10 + .../v1beta1/main.libsonnet | 19 + .../v1beta1/matchCondition.libsonnet | 10 + .../v1beta1/matchResources.libsonnet | 38 ++ .../v1beta1/namedRuleWithOperations.libsonnet | 6 +- .../v1beta1/paramKind.libsonnet | 8 + .../v1beta1/paramRef.libsonnet | 23 + .../v1beta1/typeChecking.libsonnet | 10 + .../validatingAdmissionPolicy.libsonnet | 117 +++++ ...validatingAdmissionPolicyBinding.libsonnet | 118 +++++ ...datingAdmissionPolicyBindingSpec.libsonnet | 67 +++ .../validatingAdmissionPolicySpec.libsonnet | 66 +++ .../validatingAdmissionPolicyStatus.libsonnet | 19 + .../v1beta1/validation.libsonnet | 14 + .../v1beta1/variable.libsonnet | 10 + .../1.29}/_gen/apiregistration/main.libsonnet | 1 - .../apiregistration/v1/apiService.libsonnet | 22 +- .../v1/apiServiceCondition.libsonnet | 0 .../v1/apiServiceSpec.libsonnet | 2 +- .../v1/apiServiceStatus.libsonnet | 0 .../_gen/apiregistration/v1/main.libsonnet | 0 .../v1/serviceReference.libsonnet | 0 .../_gen/apiserverinternal/main.libsonnet | 0 .../apiserverinternal/v1alpha1/main.libsonnet | 0 .../v1alpha1/serverStorageVersion.libsonnet | 4 + .../v1alpha1/storageVersion.libsonnet | 22 +- .../storageVersionCondition.libsonnet | 0 .../v1alpha1/storageVersionSpec.libsonnet | 0 .../v1alpha1/storageVersionStatus.libsonnet | 0 .../{1.21 => 1.29}/_gen/apps/main.libsonnet | 0 .../_gen/apps/v1/controllerRevision.libsonnet | 24 +- .../_gen/apps/v1/daemonSet.libsonnet | 97 ++-- .../_gen/apps/v1/daemonSetCondition.libsonnet | 0 .../_gen/apps/v1/daemonSetSpec.libsonnet | 77 +-- .../_gen/apps/v1/daemonSetStatus.libsonnet | 2 +- .../apps/v1/daemonSetUpdateStrategy.libsonnet | 0 .../1.29}/_gen/apps/v1/deployment.libsonnet | 97 ++-- .../apps/v1/deploymentCondition.libsonnet | 0 .../_gen/apps/v1/deploymentSpec.libsonnet | 77 +-- .../_gen/apps/v1/deploymentStatus.libsonnet | 2 +- .../_gen/apps/v1/deploymentStrategy.libsonnet | 0 .../_gen/apps/v1/main.libsonnet | 2 + .../_gen/apps/v1/replicaSet.libsonnet | 97 ++-- .../apps/v1/replicaSetCondition.libsonnet | 0 .../_gen/apps/v1/replicaSetSpec.libsonnet | 77 +-- .../_gen/apps/v1/replicaSetStatus.libsonnet | 4 +- .../apps/v1/rollingUpdateDaemonSet.libsonnet | 0 .../apps/v1/rollingUpdateDeployment.libsonnet | 0 ...rollingUpdateStatefulSetStrategy.libsonnet | 10 + .../1.29}/_gen/apps/v1/statefulSet.libsonnet | 117 +++-- .../apps/v1/statefulSetCondition.libsonnet | 0 .../apps/v1/statefulSetOrdinals.libsonnet | 8 + ...istentVolumeClaimRetentionPolicy.libsonnet | 10 + .../_gen/apps/v1/statefulSetSpec.libsonnet | 95 ++-- .../_gen/apps/v1/statefulSetStatus.libsonnet | 4 +- .../v1/statefulSetUpdateStrategy.libsonnet | 4 +- .../1.29}/_gen/authentication/main.libsonnet | 1 + .../v1/boundObjectReference.libsonnet | 0 .../_gen/authentication/v1/main.libsonnet | 2 + .../v1/selfSubjectReview.libsonnet | 54 ++ .../v1/selfSubjectReviewStatus.libsonnet | 21 + .../authentication/v1/tokenRequest.libsonnet | 24 +- .../v1/tokenRequestSpec.libsonnet | 4 +- .../v1/tokenRequestStatus.libsonnet | 0 .../authentication/v1/tokenReview.libsonnet | 20 +- .../v1/tokenReviewSpec.libsonnet | 0 .../v1/tokenReviewStatus.libsonnet | 0 .../_gen/authentication/v1/userInfo.libsonnet | 0 .../authentication/v1alpha1/main.libsonnet | 6 + .../v1alpha1/selfSubjectReview.libsonnet | 54 ++ .../selfSubjectReviewStatus.libsonnet | 21 + .../authentication/v1beta1/main.libsonnet | 6 + .../v1beta1/selfSubjectReview.libsonnet | 54 ++ .../v1beta1/selfSubjectReviewStatus.libsonnet | 21 + .../1.29}/_gen/authorization/main.libsonnet | 1 - .../v1/localSubjectAccessReview.libsonnet | 20 +- .../_gen/authorization/v1/main.libsonnet | 0 .../v1/nonResourceAttributes.libsonnet | 0 .../v1/nonResourceRule.libsonnet | 0 .../v1/resourceAttributes.libsonnet | 0 .../authorization/v1/resourceRule.libsonnet | 0 .../v1/selfSubjectAccessReview.libsonnet | 20 +- .../v1/selfSubjectAccessReviewSpec.libsonnet | 0 .../v1/selfSubjectRulesReview.libsonnet | 22 +- .../v1}/selfSubjectRulesReviewSpec.libsonnet | 2 +- .../v1/subjectAccessReview.libsonnet | 20 +- .../v1/subjectAccessReviewSpec.libsonnet | 0 .../v1/subjectAccessReviewStatus.libsonnet | 0 .../v1/subjectRulesReviewStatus.libsonnet | 0 .../_gen/autoscaling/main.libsonnet | 3 +- .../v1/crossVersionObjectReference.libsonnet | 10 + .../v1/horizontalPodAutoscaler.libsonnet | 30 +- .../v1/horizontalPodAutoscalerSpec.libsonnet | 10 +- .../horizontalPodAutoscalerStatus.libsonnet | 8 +- .../_gen/autoscaling/v1/main.libsonnet | 0 .../1.29}/_gen/autoscaling/v1/scale.libsonnet | 22 +- .../_gen/autoscaling/v1/scaleSpec.libsonnet | 2 +- .../_gen/autoscaling/v1/scaleStatus.libsonnet | 10 + .../containerResourceMetricSource.libsonnet | 21 + .../containerResourceMetricStatus.libsonnet | 19 + .../v2/crossVersionObjectReference.libsonnet | 10 + .../v2/externalMetricSource.libsonnet | 33 ++ .../v2/externalMetricStatus.libsonnet | 31 ++ .../v2}/horizontalPodAutoscaler.libsonnet | 36 +- .../horizontalPodAutoscalerBehavior.libsonnet | 8 +- ...horizontalPodAutoscalerCondition.libsonnet | 0 .../v2}/horizontalPodAutoscalerSpec.libsonnet | 14 +- .../horizontalPodAutoscalerStatus.libsonnet | 0 .../v2}/hpaScalingPolicy.libsonnet | 6 +- .../autoscaling/v2}/hpaScalingRules.libsonnet | 4 +- .../1.29/_gen/autoscaling/v2}/main.libsonnet | 2 +- .../v2}/metricIdentifier.libsonnet | 0 .../_gen/autoscaling/v2/metricSpec.libsonnet | 141 ++++++ .../autoscaling/v2/metricStatus.libsonnet | 131 +++++ .../autoscaling/v2/metricTarget.libsonnet | 14 + .../v2/metricValueStatus.libsonnet | 12 + .../v2/objectMetricSource.libsonnet | 42 ++ .../v2/objectMetricStatus.libsonnet | 40 ++ .../autoscaling/v2/podsMetricSource.libsonnet | 33 ++ .../autoscaling/v2/podsMetricStatus.libsonnet | 31 ++ .../v2/resourceMetricSource.libsonnet | 19 + .../v2/resourceMetricStatus.libsonnet | 17 + .../{1.21 => 1.29}/_gen/batch/main.libsonnet | 1 - .../1.29}/_gen/batch/v1/cronJob.libsonnet | 142 +++--- .../_gen/batch/v1/cronJobSpec.libsonnet | 122 +++-- .../_gen/batch/v1/cronJobStatus.libsonnet | 0 .../1.29}/_gen/batch/v1/job.libsonnet | 118 +++-- .../_gen/batch/v1/jobCondition.libsonnet | 0 .../_gen/batch/v1/jobSpec.libsonnet | 98 ++-- .../1.29}/_gen/batch/v1/jobStatus.libsonnet | 21 +- .../_gen/batch/v1}/jobTemplateSpec.libsonnet | 118 +++-- .../_gen/batch/v1/main.libsonnet | 5 + .../_gen/batch/v1/podFailurePolicy.libsonnet | 10 + ...lurePolicyOnExitCodesRequirement.libsonnet | 14 + ...lurePolicyOnPodConditionsPattern.libsonnet | 8 + .../batch/v1/podFailurePolicyRule.libsonnet | 23 + .../v1/uncountedTerminatedPods.libsonnet | 14 + .../1.29}/_gen/certificates/main.libsonnet | 2 +- .../v1/certificateSigningRequest.libsonnet | 22 +- ...rtificateSigningRequestCondition.libsonnet | 0 .../certificateSigningRequestSpec.libsonnet | 2 + .../certificateSigningRequestStatus.libsonnet | 0 .../_gen/certificates/v1/main.libsonnet | 0 .../v1alpha1/clusterTrustBundle.libsonnet | 61 +++ .../v1alpha1/clusterTrustBundleSpec.libsonnet | 10 + .../_gen/certificates/v1alpha1/main.libsonnet | 6 + .../_gen/coordination/main.libsonnet | 1 - .../_gen/coordination/v1/lease.libsonnet | 22 +- .../_gen/coordination/v1/leaseSpec.libsonnet | 2 +- .../_gen/coordination/v1/main.libsonnet | 0 .../{1.21 => 1.29}/_gen/core/main.libsonnet | 0 .../_gen/core/v1/affinity.libsonnet | 0 .../_gen/core/v1/attachedVolume.libsonnet | 0 ...awsElasticBlockStoreVolumeSource.libsonnet | 14 + .../core/v1/azureDiskVolumeSource.libsonnet | 18 + .../azureFilePersistentVolumeSource.libsonnet | 14 + .../core/v1/azureFileVolumeSource.libsonnet | 12 + .../_gen/core/v1/binding.libsonnet | 20 +- .../_gen/core/v1/capabilities.libsonnet | 0 .../v1/cephFSPersistentVolumeSource.libsonnet | 25 + .../_gen/core/v1/cephFSVolumeSource.libsonnet | 23 + .../v1/cinderPersistentVolumeSource.libsonnet | 10 +- .../_gen/core/v1/cinderVolumeSource.libsonnet | 6 +- .../1.29/_gen/core/v1/claimSource.libsonnet | 10 + .../_gen/core/v1/clientIPConfig.libsonnet | 0 .../v1/clusterTrustBundleProjection.libsonnet | 25 + .../_gen/core/v1/componentCondition.libsonnet | 0 .../_gen/core/v1/componentStatus.libsonnet | 20 +- .../1.29}/_gen/core/v1/configMap.libsonnet | 20 +- .../_gen/core/v1/configMapEnvSource.libsonnet | 0 .../core/v1/configMapKeySelector.libsonnet | 0 .../v1/configMapNodeConfigSource.libsonnet | 2 +- .../core/v1/configMapProjection.libsonnet | 14 + .../core/v1/configMapVolumeSource.libsonnet | 16 + .../_gen/core/v1/container.libsonnet | 85 +++- .../_gen/core/v1/containerImage.libsonnet | 4 +- .../_gen/core/v1/containerPort.libsonnet | 0 .../core/v1/containerResizePolicy.libsonnet | 10 + .../_gen/core/v1/containerState.libsonnet | 2 +- .../core/v1/containerStateRunning.libsonnet | 0 .../v1/containerStateTerminated.libsonnet | 2 +- .../core/v1/containerStateWaiting.libsonnet | 0 .../_gen/core/v1/containerStatus.libsonnet | 37 +- .../v1/csiPersistentVolumeSource.libsonnet | 35 +- .../_gen/core/v1/csiVolumeSource.libsonnet | 10 +- .../_gen/core/v1/daemonEndpoint.libsonnet | 0 .../core/v1/downwardAPIProjection.libsonnet | 0 .../core/v1/downwardAPIVolumeFile.libsonnet | 26 + .../core/v1/downwardAPIVolumeSource.libsonnet | 0 .../core/v1/emptyDirVolumeSource.libsonnet | 10 + .../_gen/core/v1/endpointAddress.libsonnet | 2 +- .../1.29/_gen/core/v1/endpointPort.libsonnet | 14 + .../_gen/core/v1/endpointSubset.libsonnet | 2 +- .../1.29}/_gen/core/v1/endpoints.libsonnet | 22 +- .../_gen/core/v1/envFromSource.libsonnet | 0 .../_gen/core/v1/envVar.libsonnet | 4 +- .../_gen/core/v1/envVarSource.libsonnet | 2 +- .../_gen/core/v1/ephemeralContainer.libsonnet | 89 +++- .../core/v1/ephemeralVolumeSource.libsonnet | 47 +- .../1.29}/_gen/core/v1/event.libsonnet | 20 +- .../_gen/core/v1/eventSeries.libsonnet | 0 .../_gen/core/v1/eventSource.libsonnet | 0 .../_gen/core/v1/execAction.libsonnet | 0 .../_gen/core/v1/fcVolumeSource.libsonnet | 20 + .../v1/flexPersistentVolumeSource.libsonnet | 14 +- .../_gen/core/v1/flexVolumeSource.libsonnet | 10 +- .../core/v1/flockerVolumeSource.libsonnet | 4 +- .../gcePersistentDiskVolumeSource.libsonnet | 14 + .../core/v1/gitRepoVolumeSource.libsonnet | 6 +- .../glusterfsPersistentVolumeSource.libsonnet | 8 +- .../core/v1/glusterfsVolumeSource.libsonnet | 6 +- .../1.29/_gen/core/v1/grpcAction.libsonnet | 10 + .../_gen/core/v1/hostAlias.libsonnet | 0 .../1.29/_gen/core/v1/hostIP.libsonnet | 8 + .../core/v1/hostPathVolumeSource.libsonnet | 4 +- .../_gen/core/v1/httpGetAction.libsonnet | 0 .../1.29}/_gen/core/v1/httpHeader.libsonnet | 2 +- .../v1/iscsiPersistentVolumeSource.libsonnet | 35 ++ .../_gen/core/v1/iscsiVolumeSource.libsonnet | 33 ++ .../1.29/_gen/core/v1/keyToPath.libsonnet | 12 + .../_gen/core/v1/lifecycle.libsonnet | 14 +- .../_gen/core/v1/lifecycleHandler.libsonnet | 7 +- .../_gen/core/v1/limitRange.libsonnet | 20 +- .../_gen/core/v1/limitRangeItem.libsonnet | 0 .../_gen/core/v1/limitRangeSpec.libsonnet | 0 .../core/v1/loadBalancerIngress.libsonnet | 2 + .../_gen/core/v1/loadBalancerStatus.libsonnet | 0 .../core/v1/localObjectReference.libsonnet | 0 .../_gen/core/v1/localVolumeSource.libsonnet | 10 + .../1.29}/_gen/core/v1/main.libsonnet | 17 +- .../_gen/core/v1/modifyVolumeStatus.libsonnet | 8 + .../_gen/core/v1/namespace.libsonnet | 20 +- .../_gen/core/v1/namespaceCondition.libsonnet | 0 .../_gen/core/v1/namespaceSpec.libsonnet | 0 .../_gen/core/v1/namespaceStatus.libsonnet | 0 .../_gen/core/v1/nfsVolumeSource.libsonnet | 6 +- .../1.29}/_gen/core/v1/node.libsonnet | 24 +- .../_gen/core/v1/nodeAddress.libsonnet | 0 .../_gen/core/v1/nodeAffinity.libsonnet | 0 .../_gen/core/v1/nodeCondition.libsonnet | 0 .../_gen/core/v1/nodeConfigSource.libsonnet | 4 +- .../_gen/core/v1/nodeConfigStatus.libsonnet | 12 +- .../core/v1/nodeDaemonEndpoints.libsonnet | 0 .../_gen/core/v1/nodeSelector.libsonnet | 0 .../core/v1/nodeSelectorRequirement.libsonnet | 0 .../_gen/core/v1/nodeSelectorTerm.libsonnet | 0 .../1.29}/_gen/core/v1/nodeSpec.libsonnet | 4 +- .../_gen/core/v1/nodeStatus.libsonnet | 18 +- .../_gen/core/v1/nodeSystemInfo.libsonnet | 2 +- .../core/v1/objectFieldSelector.libsonnet | 0 .../_gen/core/v1/objectReference.libsonnet | 0 .../_gen/core/v1/persistentVolume.libsonnet | 295 +++++------ .../core/v1/persistentVolumeClaim.libsonnet | 47 +- .../persistentVolumeClaimCondition.libsonnet | 6 +- .../v1/persistentVolumeClaimSpec.libsonnet | 27 +- .../v1/persistentVolumeClaimStatus.libsonnet | 35 ++ .../persistentVolumeClaimTemplate.libsonnet | 47 +- ...ersistentVolumeClaimVolumeSource.libsonnet | 4 +- .../core/v1/persistentVolumeSpec.libsonnet | 275 +++++----- .../core/v1/persistentVolumeStatus.libsonnet | 14 + ...photonPersistentDiskVolumeSource.libsonnet | 10 + .../1.29}/_gen/core/v1/pod.libsonnet | 77 +-- .../_gen/core/v1/podAffinity.libsonnet | 0 .../_gen/core/v1/podAffinityTerm.libsonnet | 12 +- .../_gen/core/v1/podAntiAffinity.libsonnet | 0 .../_gen/core/v1/podCondition.libsonnet | 0 .../_gen/core/v1/podDNSConfig.libsonnet | 0 .../_gen/core/v1/podDNSConfigOption.libsonnet | 0 .../1.29/_gen/core/v1/podIP.libsonnet | 8 + .../1.29/_gen/core/v1/podOS.libsonnet | 8 + .../_gen/core/v1/podReadinessGate.libsonnet | 0 .../_gen/core/v1/podResourceClaim.libsonnet | 15 + .../core/v1/podResourceClaimStatus.libsonnet | 10 + .../_gen/core/v1/podSchedulingGate.libsonnet | 8 + .../_gen/core/v1/podSecurityContext.libsonnet | 20 +- .../1.29}/_gen/core/v1/podSpec.libsonnet | 57 ++- .../1.29}/_gen/core/v1/podStatus.libsonnet | 24 +- .../_gen/core/v1/podTemplate.libsonnet | 97 ++-- .../_gen/core/v1/podTemplateSpec.libsonnet | 77 +-- .../_gen/core/v1/portStatus.libsonnet | 0 .../core/v1/portworxVolumeSource.libsonnet | 6 +- .../core/v1/preferredSchedulingTerm.libsonnet | 0 .../1.29}/_gen/core/v1/probe.libsonnet | 9 +- .../core/v1/projectedVolumeSource.libsonnet | 12 + .../core/v1/quobyteVolumeSource.libsonnet | 12 +- .../v1/rbdPersistentVolumeSource.libsonnet | 29 ++ .../_gen/core/v1/rbdVolumeSource.libsonnet | 27 + .../core/v1/replicationController.libsonnet | 97 ++-- .../replicationControllerCondition.libsonnet | 0 .../v1/replicationControllerSpec.libsonnet | 77 +-- .../v1/replicationControllerStatus.libsonnet | 2 +- .../1.29/_gen/core/v1/resourceClaim.libsonnet | 8 + .../core/v1/resourceFieldSelector.libsonnet | 12 + .../_gen/core/v1/resourceQuota.libsonnet | 20 +- .../_gen/core/v1/resourceQuotaSpec.libsonnet | 0 .../core/v1/resourceQuotaStatus.libsonnet | 0 .../core/v1/resourceRequirements.libsonnet | 18 + .../scaleIOPersistentVolumeSource.libsonnet | 31 ++ .../core/v1/scaleIOVolumeSource.libsonnet | 29 ++ .../_gen/core/v1/scopeSelector.libsonnet | 0 ...copedResourceSelectorRequirement.libsonnet | 0 .../_gen/core/v1/seLinuxOptions.libsonnet | 0 .../_gen/core/v1/seccompProfile.libsonnet | 2 +- .../1.29}/_gen/core/v1/secret.libsonnet | 22 +- .../_gen/core/v1/secretEnvSource.libsonnet | 0 .../_gen/core/v1/secretKeySelector.libsonnet | 0 .../_gen/core/v1/secretProjection.libsonnet | 14 + .../_gen/core/v1/secretReference.libsonnet | 4 +- .../_gen/core/v1/secretVolumeSource.libsonnet | 16 + .../_gen/core/v1/securityContext.libsonnet | 16 +- .../_gen/core/v1/service.libsonnet | 48 +- .../_gen/core/v1/serviceAccount.libsonnet | 24 +- .../serviceAccountTokenProjection.libsonnet | 6 +- .../_gen/core/v1/servicePort.libsonnet | 2 +- .../1.29}/_gen/core/v1/serviceSpec.libsonnet | 28 +- .../_gen/core/v1/serviceStatus.libsonnet | 0 .../core/v1/sessionAffinityConfig.libsonnet | 0 .../1.29/_gen/core/v1/sleepAction.libsonnet | 8 + .../storageOSPersistentVolumeSource.libsonnet | 8 +- .../core/v1/storageOSVolumeSource.libsonnet | 8 +- .../_gen/core/v1/sysctl.libsonnet | 0 .../_gen/core/v1/taint.libsonnet | 0 .../_gen/core/v1/tcpSocketAction.libsonnet | 0 .../_gen/core/v1/toleration.libsonnet | 0 ...topologySelectorLabelRequirement.libsonnet | 0 .../core/v1/topologySelectorTerm.libsonnet | 0 .../v1/topologySpreadConstraint.libsonnet | 33 ++ .../v1/typedLocalObjectReference.libsonnet | 0 .../core/v1/typedObjectReference.libsonnet | 14 + .../_gen/core/v1/volume.libsonnet | 287 ++++++----- .../_gen/core/v1/volumeDevice.libsonnet | 0 .../_gen/core/v1/volumeMount.libsonnet | 0 .../_gen/core/v1/volumeNodeAffinity.libsonnet | 0 .../_gen/core/v1/volumeProjection.libsonnet | 66 +++ .../v1/volumeResourceRequirements.libsonnet} | 6 +- .../vsphereVirtualDiskVolumeSource.libsonnet | 14 + .../core/v1/weightedPodAffinityTerm.libsonnet | 12 +- .../windowsSecurityContextOptions.libsonnet | 2 + .../_gen/discovery/main.libsonnet | 1 - .../_gen/discovery/v1/endpoint.libsonnet | 12 +- .../discovery/v1/endpointConditions.libsonnet | 6 +- .../_gen/discovery/v1/endpointHints.libsonnet | 0 .../_gen/discovery/v1/endpointPort.libsonnet | 14 + .../_gen/discovery/v1/endpointSlice.libsonnet | 20 +- .../_gen/discovery/v1/forZone.libsonnet | 0 .../_gen/discovery/v1/main.libsonnet | 0 .../{1.21 => 1.29}/_gen/events/main.libsonnet | 1 - .../1.29}/_gen/events/v1/event.libsonnet | 20 +- .../_gen/events/v1/eventSeries.libsonnet | 0 .../_gen/events/v1/main.libsonnet | 0 .../1.29}/_gen/flowcontrol/main.libsonnet | 3 +- ...exemptPriorityLevelConfiguration.libsonnet | 10 + .../v1}/flowDistinguisherMethod.libsonnet | 0 .../_gen/flowcontrol/v1}/flowSchema.libsonnet | 22 +- .../v1}/flowSchemaCondition.libsonnet | 0 .../flowcontrol/v1}/flowSchemaSpec.libsonnet | 0 .../v1}/flowSchemaStatus.libsonnet | 0 .../flowcontrol/v1}/groupSubject.libsonnet | 0 .../flowcontrol/v1}/limitResponse.libsonnet | 0 ...imitedPriorityLevelConfiguration.libsonnet | 26 + .../1.29/_gen/flowcontrol/v1}/main.libsonnet | 3 +- .../v1}/nonResourcePolicyRule.libsonnet | 0 .../v1}/policyRulesWithSubjects.libsonnet | 0 .../v1/priorityLevelConfiguration.libsonnet | 89 ++++ ...orityLevelConfigurationCondition.libsonnet | 0 ...orityLevelConfigurationReference.libsonnet | 0 .../priorityLevelConfigurationSpec.libsonnet | 38 ++ ...priorityLevelConfigurationStatus.libsonnet | 0 .../v1}/queuingConfiguration.libsonnet | 0 .../v1}/resourcePolicyRule.libsonnet | 2 +- .../v1}/serviceAccountSubject.libsonnet | 0 .../_gen/flowcontrol/v1}/subject.libsonnet | 2 +- .../flowcontrol/v1}/userSubject.libsonnet | 0 ...exemptPriorityLevelConfiguration.libsonnet | 10 + .../flowDistinguisherMethod.libsonnet | 0 .../flowcontrol/v1beta3}/flowSchema.libsonnet | 22 +- .../v1beta3}/flowSchemaCondition.libsonnet | 0 .../v1beta3}/flowSchemaSpec.libsonnet | 0 .../v1beta3}/flowSchemaStatus.libsonnet | 0 .../v1beta3}/groupSubject.libsonnet | 0 .../v1beta3}/limitResponse.libsonnet | 0 ...imitedPriorityLevelConfiguration.libsonnet | 10 +- .../_gen/flowcontrol/v1beta3/main.libsonnet | 25 + .../v1beta3}/nonResourcePolicyRule.libsonnet | 0 .../policyRulesWithSubjects.libsonnet | 0 .../priorityLevelConfiguration.libsonnet | 39 +- ...orityLevelConfigurationCondition.libsonnet | 0 ...orityLevelConfigurationReference.libsonnet | 0 .../priorityLevelConfigurationSpec.libsonnet | 38 ++ ...priorityLevelConfigurationStatus.libsonnet | 0 .../v1beta3}/queuingConfiguration.libsonnet | 0 .../v1beta3}/resourcePolicyRule.libsonnet | 2 +- .../v1beta3}/serviceAccountSubject.libsonnet | 0 .../flowcontrol/v1beta3}/subject.libsonnet | 2 +- .../v1beta3}/userSubject.libsonnet | 0 .../1.29}/_gen/meta/main.libsonnet | 0 .../1.29}/_gen/meta/v1/apiGroup.libsonnet | 0 .../1.29}/_gen/meta/v1/apiGroupList.libsonnet | 0 .../1.29}/_gen/meta/v1/apiResource.libsonnet | 0 .../_gen/meta/v1/apiResourceList.libsonnet | 0 .../1.29}/_gen/meta/v1/apiVersions.libsonnet | 0 .../1.29}/_gen/meta/v1/condition.libsonnet | 0 .../_gen/meta/v1/deleteOptions.libsonnet | 0 .../1.29}/_gen/meta/v1/fieldsV1.libsonnet | 0 .../v1/groupVersionForDiscovery.libsonnet | 0 .../_gen/meta/v1/labelSelector.libsonnet | 0 .../v1/labelSelectorRequirement.libsonnet | 0 .../1.29}/_gen/meta/v1/listMeta.libsonnet | 2 +- .../1.29}/_gen/meta/v1/main.libsonnet | 0 .../_gen/meta/v1/managedFieldsEntry.libsonnet | 2 + .../1.29}/_gen/meta/v1/microTime.libsonnet | 0 .../1.29}/_gen/meta/v1/objectMeta.libsonnet | 20 +- .../_gen/meta/v1/ownerReference.libsonnet | 6 +- .../1.29}/_gen/meta/v1/patch.libsonnet | 0 .../_gen/meta/v1/preconditions.libsonnet | 0 .../v1/serverAddressByClientCIDR.libsonnet | 0 .../1.29}/_gen/meta/v1/statusCause.libsonnet | 0 .../_gen/meta/v1/statusDetails.libsonnet | 2 +- .../1.29}/_gen/meta/v1/time.libsonnet | 0 .../1.29/_gen/meta/v1/watchEvent.libsonnet | 17 + .../_gen/networking/main.libsonnet | 2 +- .../networking/v1/httpIngressPath.libsonnet | 10 +- .../v1}/httpIngressRuleValue.libsonnet | 4 +- .../_gen/networking/v1/ingress.libsonnet | 36 +- .../networking/v1/ingressBackend.libsonnet | 6 +- .../_gen/networking/v1/ingressClass.libsonnet | 32 +- .../ingressClassParametersReference.libsonnet | 10 +- .../networking/v1}/ingressClassSpec.libsonnet | 12 +- .../v1/ingressLoadBalancerIngress.libsonnet | 14 + .../v1/ingressLoadBalancerStatus.libsonnet | 10 + .../networking/v1/ingressPortStatus.libsonnet | 12 + .../_gen/networking/v1}/ingressRule.libsonnet | 6 +- .../v1/ingressServiceBackend.libsonnet | 6 +- .../_gen/networking/v1/ingressSpec.libsonnet | 16 +- .../networking/v1/ingressStatus.libsonnet | 13 + .../_gen/networking/v1/ingressTLS.libsonnet | 12 + .../1.29/_gen/networking/v1/ipBlock.libsonnet | 12 + .../_gen/networking/v1/main.libsonnet | 3 + .../networking/v1/networkPolicy.libsonnet | 32 +- .../v1/networkPolicyEgressRule.libsonnet | 14 + .../v1/networkPolicyIngressRule.libsonnet | 14 + .../networking/v1/networkPolicyPeer.libsonnet | 8 +- .../networking/v1/networkPolicyPort.libsonnet | 12 + .../networking/v1/networkPolicySpec.libsonnet | 29 ++ .../v1/serviceBackendPort.libsonnet | 4 +- .../networking/v1alpha1/ipAddress.libsonnet | 68 +++ .../v1alpha1/ipAddressSpec.libsonnet | 17 + .../_gen/networking/v1alpha1/main.libsonnet | 10 + .../v1alpha1/parentReference.libsonnet | 14 + .../networking/v1alpha1/serviceCIDR.libsonnet | 61 +++ .../v1alpha1/serviceCIDRSpec.libsonnet | 10 + .../v1alpha1/serviceCIDRStatus.libsonnet | 10 + .../1.29}/_gen/node/main.libsonnet | 2 - .../_gen/node/v1/main.libsonnet | 0 .../_gen/node/v1/overhead.libsonnet | 4 +- .../1.29}/_gen/node/v1/runtimeClass.libsonnet | 26 +- .../_gen/node/v1/scheduling.libsonnet | 0 .../1.29}/_gen/policy/main.libsonnet | 1 - .../1.29/_gen/policy/v1}/eviction.libsonnet | 22 +- .../1.29}/_gen/policy/v1/main.libsonnet | 1 + .../policy/v1/podDisruptionBudget.libsonnet | 22 +- .../v1}/podDisruptionBudgetSpec.libsonnet | 2 + .../v1/podDisruptionBudgetStatus.libsonnet | 0 .../{1.21 => 1.29}/_gen/rbac/main.libsonnet | 2 - .../_gen/rbac/v1/aggregationRule.libsonnet | 0 .../_gen/rbac/v1/clusterRole.libsonnet | 20 +- .../_gen/rbac/v1/clusterRoleBinding.libsonnet | 20 +- .../_gen/rbac/v1/main.libsonnet | 0 .../1.29}/_gen/rbac/v1/policyRule.libsonnet | 12 +- .../_gen/rbac/v1/role.libsonnet | 20 +- .../1.29}/_gen/rbac/v1/roleBinding.libsonnet | 20 +- .../_gen/rbac/v1/roleRef.libsonnet | 0 .../_gen/rbac/v1/subject.libsonnet | 0 .../1.29/_gen/resource/main.libsonnet | 5 + .../v1alpha2/allocationResult.libsonnet | 19 + .../_gen/resource/v1alpha2/main.libsonnet | 19 + .../v1alpha2/podSchedulingContext.libsonnet | 63 +++ .../podSchedulingContextSpec.libsonnet | 12 + .../podSchedulingContextStatus.libsonnet | 10 + .../v1alpha2/resourceClaim.libsonnet} | 53 +- .../resourceClaimConsumerReference.libsonnet | 14 + ...resourceClaimParametersReference.libsonnet | 12 + .../resourceClaimSchedulingStatus.libsonnet | 12 + .../v1alpha2/resourceClaimSpec.libsonnet | 19 + .../v1alpha2/resourceClaimStatus.libsonnet | 30 ++ .../v1alpha2/resourceClaimTemplate.libsonnet | 116 +++++ .../resourceClaimTemplateSpec.libsonnet} | 44 +- .../resource/v1alpha2/resourceClass.libsonnet | 74 +++ ...resourceClassParametersReference.libsonnet | 14 + .../v1alpha2/resourceHandle.libsonnet | 10 + .../_gen/scheduling/main.libsonnet | 2 - .../_gen/scheduling/v1/main.libsonnet | 0 .../scheduling/v1/priorityClass.libsonnet | 24 +- .../1.29}/_gen/storage/main.libsonnet | 1 - .../_gen/storage/v1/csiDriver.libsonnet | 38 +- .../_gen/storage/v1/csiDriverSpec.libsonnet | 26 + .../1.29}/_gen/storage/v1/csiNode.libsonnet | 20 +- .../_gen/storage/v1/csiNodeDriver.libsonnet | 4 +- .../_gen/storage/v1/csiNodeSpec.libsonnet | 0 .../storage/v1}/csiStorageCapacity.libsonnet | 30 +- .../1.29/_gen/storage/v1}/main.libsonnet | 2 +- .../_gen/storage/v1/storageClass.libsonnet | 40 +- .../_gen/storage/v1/tokenRequest.libsonnet | 4 +- .../storage/v1/volumeAttachment.libsonnet | 301 +++++------ .../v1}/volumeAttachmentSource.libsonnet | 277 +++++----- .../v1}/volumeAttachmentSpec.libsonnet | 281 +++++----- .../v1/volumeAttachmentStatus.libsonnet | 26 + .../_gen/storage/v1/volumeError.libsonnet | 2 +- .../storage/v1/volumeNodeResources.libsonnet | 8 + .../_gen/storage}/v1alpha1/main.libsonnet | 2 +- .../v1alpha1/volumeAttributesClass.libsonnet} | 47 +- .../k8s-libsonnet/1.29}/gen.libsonnet | 4 +- .../{1.21 => 1.29}/main.libsonnet | 0 .../jsonnet-compiled/util/jsonnetfile.json | 2 +- .../util/jsonnetfile.lock.json | 10 +- .../jsonnet-compiled/util/lib/k.libsonnet | 2 +- operations/jsonnet-compiled/util/vendor/1.21 | 1 - operations/jsonnet-compiled/util/vendor/1.29 | 1 + .../v1/serviceReference.libsonnet | 14 - .../v1/webhookClientConfig.libsonnet | 21 - .../v1beta1/main.libsonnet | 11 - .../v1beta1/mutatingWebhook.libsonnet | 66 --- .../mutatingWebhookConfiguration.libsonnet | 60 --- .../v1beta1/serviceReference.libsonnet | 14 - .../v1beta1/validatingWebhook.libsonnet | 64 --- .../validatingWebhookConfiguration.libsonnet | 60 --- .../v1beta1/webhookClientConfig.libsonnet | 21 - .../v1/apiServiceCondition.libsonnet | 14 - .../v1/apiServiceStatus.libsonnet | 10 - .../v1/serviceReference.libsonnet | 12 - .../v1beta1/apiService.libsonnet | 80 --- .../v1beta1/apiServiceCondition.libsonnet | 14 - .../v1beta1/apiServiceStatus.libsonnet | 10 - .../apiregistration/v1beta1/main.libsonnet | 9 - .../v1beta1/serviceReference.libsonnet | 12 - ...rollingUpdateStatefulSetStrategy.libsonnet | 8 - .../v1/tokenReviewSpec.libsonnet | 12 - .../v1/tokenReviewStatus.libsonnet | 29 -- .../_gen/authentication/v1/userInfo.libsonnet | 18 - .../authentication/v1beta1/main.libsonnet | 8 - .../v1beta1/tokenReview.libsonnet | 65 --- .../v1beta1/tokenReviewSpec.libsonnet | 12 - .../v1beta1/tokenReviewStatus.libsonnet | 29 -- .../authentication/v1beta1/userInfo.libsonnet | 18 - .../v1/nonResourceAttributes.libsonnet | 10 - .../v1/nonResourceRule.libsonnet | 14 - .../v1/resourceAttributes.libsonnet | 20 - .../authorization/v1/resourceRule.libsonnet | 22 - .../v1/selfSubjectAccessReviewSpec.libsonnet | 30 -- .../v1/selfSubjectRulesReviewSpec.libsonnet | 8 - .../v1/subjectAccessReviewStatus.libsonnet | 14 - .../v1/subjectRulesReviewStatus.libsonnet | 18 - .../localSubjectAccessReview.libsonnet | 95 ---- .../_gen/authorization/v1beta1/main.libsonnet | 17 - .../v1beta1/nonResourceAttributes.libsonnet | 10 - .../v1beta1/nonResourceRule.libsonnet | 14 - .../v1beta1/resourceAttributes.libsonnet | 20 - .../v1beta1/resourceRule.libsonnet | 22 - .../v1beta1/selfSubjectAccessReview.libsonnet | 83 --- .../selfSubjectAccessReviewSpec.libsonnet | 30 -- .../v1beta1/selfSubjectRulesReview.libsonnet | 61 --- .../selfSubjectRulesReviewSpec.libsonnet | 8 - .../v1beta1/subjectAccessReview.libsonnet | 95 ---- .../v1beta1/subjectAccessReviewSpec.libsonnet | 42 -- .../subjectAccessReviewStatus.libsonnet | 14 - .../subjectRulesReviewStatus.libsonnet | 18 - .../v1/crossVersionObjectReference.libsonnet | 10 - .../_gen/autoscaling/v1/scaleStatus.libsonnet | 10 - .../containerResourceMetricSource.libsonnet | 14 - .../containerResourceMetricStatus.libsonnet | 14 - .../crossVersionObjectReference.libsonnet | 10 - .../v2beta1/externalMetricSource.libsonnet | 23 - .../v2beta1/externalMetricStatus.libsonnet | 23 - .../v2beta1/horizontalPodAutoscaler.libsonnet | 76 --- .../horizontalPodAutoscalerSpec.libsonnet | 23 - .../horizontalPodAutoscalerStatus.libsonnet | 22 - .../_gen/autoscaling/v2beta1/main.libsonnet | 21 - .../autoscaling/v2beta1/metricSpec.libsonnet | 95 ---- .../v2beta1/metricStatus.libsonnet | 95 ---- .../v2beta1/objectMetricSource.libsonnet | 32 -- .../v2beta1/objectMetricStatus.libsonnet | 32 -- .../v2beta1/podsMetricSource.libsonnet | 21 - .../v2beta1/podsMetricStatus.libsonnet | 21 - .../v2beta1/resourceMetricSource.libsonnet | 12 - .../v2beta1/resourceMetricStatus.libsonnet | 12 - .../containerResourceMetricSource.libsonnet | 21 - .../containerResourceMetricStatus.libsonnet | 19 - .../crossVersionObjectReference.libsonnet | 10 - .../v2beta2/externalMetricSource.libsonnet | 33 -- .../v2beta2/externalMetricStatus.libsonnet | 31 -- .../horizontalPodAutoscalerStatus.libsonnet | 22 - .../autoscaling/v2beta2/metricSpec.libsonnet | 141 ------ .../v2beta2/metricStatus.libsonnet | 131 ----- .../v2beta2/metricTarget.libsonnet | 14 - .../v2beta2/metricValueStatus.libsonnet | 12 - .../v2beta2/objectMetricSource.libsonnet | 42 -- .../v2beta2/objectMetricStatus.libsonnet | 40 -- .../v2beta2/podsMetricSource.libsonnet | 33 -- .../v2beta2/podsMetricStatus.libsonnet | 31 -- .../v2beta2/resourceMetricSource.libsonnet | 19 - .../v2beta2/resourceMetricStatus.libsonnet | 17 - .../_gen/batch/v1/cronJobStatus.libsonnet | 14 - .../1.21/_gen/batch/v1beta1/cronJob.libsonnet | 388 -------------- .../_gen/batch/v1beta1/cronJobSpec.libsonnet | 335 ------------ .../batch/v1beta1/cronJobStatus.libsonnet | 14 - .../batch/v1beta1/jobTemplateSpec.libsonnet | 320 ------------ .../1.21/_gen/batch/v1beta1/main.libsonnet | 8 - .../certificateSigningRequest.libsonnet | 79 --- ...rtificateSigningRequestCondition.libsonnet | 16 - .../certificateSigningRequestSpec.libsonnet | 26 - .../certificateSigningRequestStatus.libsonnet | 12 - .../_gen/certificates/v1beta1/main.libsonnet | 8 - .../_gen/coordination/v1/leaseSpec.libsonnet | 16 - .../coordination/v1beta1/leaseSpec.libsonnet | 16 - .../_gen/coordination/v1beta1/main.libsonnet | 6 - ...awsElasticBlockStoreVolumeSource.libsonnet | 14 - .../core/v1/azureDiskVolumeSource.libsonnet | 18 - .../azureFilePersistentVolumeSource.libsonnet | 14 - .../core/v1/azureFileVolumeSource.libsonnet | 12 - .../v1/cephFSPersistentVolumeSource.libsonnet | 25 - .../_gen/core/v1/cephFSVolumeSource.libsonnet | 23 - .../core/v1/configMapProjection.libsonnet | 14 - .../core/v1/configMapVolumeSource.libsonnet | 16 - .../core/v1/downwardAPIVolumeFile.libsonnet | 26 - .../core/v1/emptyDirVolumeSource.libsonnet | 10 - .../1.21/_gen/core/v1/endpointPort.libsonnet | 14 - .../core/v1/ephemeralContainers.libsonnet | 60 --- .../_gen/core/v1/fcVolumeSource.libsonnet | 20 - .../gcePersistentDiskVolumeSource.libsonnet | 14 - .../v1/iscsiPersistentVolumeSource.libsonnet | 35 -- .../_gen/core/v1/iscsiVolumeSource.libsonnet | 33 -- .../1.21/_gen/core/v1/keyToPath.libsonnet | 12 - .../_gen/core/v1/localVolumeSource.libsonnet | 10 - .../v1/persistentVolumeClaimStatus.libsonnet | 20 - .../core/v1/persistentVolumeStatus.libsonnet | 12 - ...photonPersistentDiskVolumeSource.libsonnet | 10 - .../1.21/_gen/core/v1/podIP.libsonnet | 8 - .../core/v1/projectedVolumeSource.libsonnet | 12 - .../v1/rbdPersistentVolumeSource.libsonnet | 29 -- .../_gen/core/v1/rbdVolumeSource.libsonnet | 27 - .../core/v1/resourceFieldSelector.libsonnet | 12 - .../scaleIOPersistentVolumeSource.libsonnet | 31 -- .../core/v1/scaleIOVolumeSource.libsonnet | 29 -- .../_gen/core/v1/secretProjection.libsonnet | 14 - .../_gen/core/v1/secretVolumeSource.libsonnet | 16 - .../v1/topologySpreadConstraint.libsonnet | 23 - .../_gen/core/v1/volumeProjection.libsonnet | 44 -- .../vsphereVirtualDiskVolumeSource.libsonnet | 14 - .../discovery/v1/endpointConditions.libsonnet | 12 - .../_gen/discovery/v1/endpointPort.libsonnet | 14 - .../1.21/_gen/discovery/v1/forZone.libsonnet | 8 - .../_gen/discovery/v1beta1/endpoint.libsonnet | 51 -- .../v1beta1/endpointConditions.libsonnet | 12 - .../discovery/v1beta1/endpointHints.libsonnet | 10 - .../discovery/v1beta1/endpointPort.libsonnet | 14 - .../discovery/v1beta1/endpointSlice.libsonnet | 66 --- .../_gen/discovery/v1beta1/forZone.libsonnet | 8 - .../_gen/discovery/v1beta1/main.libsonnet | 10 - .../1.21/_gen/events/v1beta1/event.libsonnet | 124 ----- .../_gen/events/v1beta1/eventSeries.libsonnet | 10 - .../1.21/_gen/events/v1beta1/main.libsonnet | 6 - .../1.21/_gen/extensions/main.libsonnet | 5 - .../v1beta1/httpIngressPath.libsonnet | 26 - .../_gen/extensions/v1beta1/ingress.libsonnet | 85 ---- .../v1beta1/ingressBackend.libsonnet | 19 - .../extensions/v1beta1/ingressRule.libsonnet | 15 - .../extensions/v1beta1/ingressSpec.libsonnet | 32 -- .../v1beta1/ingressStatus.libsonnet | 13 - .../extensions/v1beta1/ingressTLS.libsonnet | 12 - .../_gen/extensions/v1beta1/main.libsonnet | 12 - .../priorityLevelConfigurationSpec.libsonnet | 27 - .../1.21/_gen/meta/v1/watchEvent.libsonnet | 17 - .../v1/httpIngressRuleValue.libsonnet | 10 - .../ingressClassParametersReference.libsonnet | 16 - .../networking/v1/ingressClassSpec.libsonnet | 21 - .../_gen/networking/v1/ingressRule.libsonnet | 15 - .../networking/v1/ingressStatus.libsonnet | 13 - .../_gen/networking/v1/ingressTLS.libsonnet | 12 - .../1.21/_gen/networking/v1/ipBlock.libsonnet | 12 - .../v1/networkPolicyEgressRule.libsonnet | 14 - .../v1/networkPolicyIngressRule.libsonnet | 14 - .../networking/v1/networkPolicyPort.libsonnet | 12 - .../networking/v1/networkPolicySpec.libsonnet | 29 -- .../v1beta1/httpIngressPath.libsonnet | 26 - .../v1beta1/httpIngressRuleValue.libsonnet | 10 - .../_gen/networking/v1beta1/ingress.libsonnet | 85 ---- .../v1beta1/ingressBackend.libsonnet | 19 - .../networking/v1beta1/ingressClass.libsonnet | 74 --- .../ingressClassParametersReference.libsonnet | 16 - .../v1beta1/ingressClassSpec.libsonnet | 21 - .../networking/v1beta1/ingressRule.libsonnet | 15 - .../networking/v1beta1/ingressSpec.libsonnet | 32 -- .../v1beta1/ingressStatus.libsonnet | 13 - .../networking/v1beta1/ingressTLS.libsonnet | 12 - .../_gen/networking/v1beta1/main.libsonnet | 15 - .../1.21/_gen/node/v1/overhead.libsonnet | 10 - .../1.21/_gen/node/v1/scheduling.libsonnet | 14 - .../1.21/_gen/node/v1alpha1/main.libsonnet | 8 - .../_gen/node/v1alpha1/overhead.libsonnet | 10 - .../_gen/node/v1alpha1/runtimeClass.libsonnet | 79 --- .../node/v1alpha1/runtimeClassSpec.libsonnet | 26 - .../_gen/node/v1alpha1/scheduling.libsonnet | 14 - .../1.21/_gen/node/v1beta1/main.libsonnet | 7 - .../1.21/_gen/node/v1beta1/overhead.libsonnet | 10 - .../_gen/node/v1beta1/runtimeClass.libsonnet | 76 --- .../_gen/node/v1beta1/scheduling.libsonnet | 14 - .../v1/podDisruptionBudgetStatus.libsonnet | 24 - .../policy/v1beta1/allowedCSIDriver.libsonnet | 8 - .../v1beta1/allowedFlexVolume.libsonnet | 8 - .../policy/v1beta1/allowedHostPath.libsonnet | 10 - .../v1beta1/fsGroupStrategyOptions.libsonnet | 12 - .../policy/v1beta1/hostPortRange.libsonnet | 10 - .../_gen/policy/v1beta1/idRange.libsonnet | 10 - .../1.21/_gen/policy/v1beta1/main.libsonnet | 21 - .../v1beta1/podDisruptionBudget.libsonnet | 74 --- .../podDisruptionBudgetStatus.libsonnet | 24 - .../v1beta1/podSecurityPolicy.libsonnet | 178 ------- .../v1beta1/podSecurityPolicySpec.libsonnet | 125 ----- .../runAsGroupStrategyOptions.libsonnet | 12 - .../runAsUserStrategyOptions.libsonnet | 12 - .../runtimeClassStrategyOptions.libsonnet | 12 - .../v1beta1/seLinuxStrategyOptions.libsonnet | 19 - ...upplementalGroupsStrategyOptions.libsonnet | 12 - .../_gen/rbac/v1/aggregationRule.libsonnet | 10 - .../1.21/_gen/rbac/v1/roleRef.libsonnet | 12 - .../1.21/_gen/rbac/v1/subject.libsonnet | 14 - .../rbac/v1alpha1/aggregationRule.libsonnet | 10 - .../_gen/rbac/v1alpha1/clusterRole.libsonnet | 67 --- .../v1alpha1/clusterRoleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1alpha1/main.libsonnet | 12 - .../_gen/rbac/v1alpha1/policyRule.libsonnet | 26 - .../1.21/_gen/rbac/v1alpha1/role.libsonnet | 60 --- .../_gen/rbac/v1alpha1/roleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1alpha1/roleRef.libsonnet | 12 - .../1.21/_gen/rbac/v1alpha1/subject.libsonnet | 12 - .../rbac/v1beta1/aggregationRule.libsonnet | 10 - .../_gen/rbac/v1beta1/clusterRole.libsonnet | 67 --- .../rbac/v1beta1/clusterRoleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1beta1/main.libsonnet | 12 - .../_gen/rbac/v1beta1/policyRule.libsonnet | 26 - .../1.21/_gen/rbac/v1beta1/role.libsonnet | 60 --- .../_gen/rbac/v1beta1/roleBinding.libsonnet | 69 --- .../1.21/_gen/rbac/v1beta1/roleRef.libsonnet | 12 - .../1.21/_gen/rbac/v1beta1/subject.libsonnet | 14 - .../v1alpha1/priorityClass.libsonnet | 64 --- .../_gen/scheduling/v1beta1/main.libsonnet | 5 - .../v1beta1/priorityClass.libsonnet | 64 --- .../_gen/storage/v1/csiDriverSpec.libsonnet | 24 - .../_gen/storage/v1/csiNodeDriver.libsonnet | 19 - .../_gen/storage/v1/csiNodeSpec.libsonnet | 10 - .../1.21/_gen/storage/v1/main.libsonnet | 17 - .../v1/volumeAttachmentSource.libsonnet | 419 --------------- .../v1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1/volumeNodeResources.libsonnet | 8 - .../v1alpha1/csiStorageCapacity.libsonnet | 73 --- .../1.21/_gen/storage/v1alpha1/main.libsonnet | 10 - .../v1alpha1/volumeAttachment.libsonnet | 479 ------------------ .../v1alpha1/volumeAttachmentSource.libsonnet | 419 --------------- .../v1alpha1/volumeAttachmentSpec.libsonnet | 426 ---------------- .../v1alpha1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1alpha1/volumeError.libsonnet | 10 - .../_gen/storage/v1beta1/csiDriver.libsonnet | 77 --- .../storage/v1beta1/csiDriverSpec.libsonnet | 24 - .../_gen/storage/v1beta1/csiNode.libsonnet | 63 --- .../storage/v1beta1/csiNodeDriver.libsonnet | 19 - .../storage/v1beta1/csiNodeSpec.libsonnet | 10 - .../storage/v1beta1/storageClass.libsonnet | 76 --- .../storage/v1beta1/tokenRequest.libsonnet | 10 - .../v1beta1/volumeAttachment.libsonnet | 479 ------------------ .../v1beta1/volumeAttachmentSource.libsonnet | 419 --------------- .../v1beta1/volumeAttachmentSpec.libsonnet | 426 ---------------- .../v1beta1/volumeAttachmentStatus.libsonnet | 26 - .../storage/v1beta1/volumeError.libsonnet | 10 - .../v1beta1/volumeNodeResources.libsonnet | 8 - .../{1.21 => 1.29}/_custom/apps.libsonnet | 7 +- .../1.29}/_custom/autoscaling.libsonnet | 5 +- .../1.29}/_custom/batch.libsonnet | 3 +- .../1.29}/_custom/core.libsonnet | 58 ++- .../{1.21 => 1.29}/_custom/list.libsonnet | 0 .../1.29}/_custom/mapContainers.libsonnet | 42 +- .../{1.21 => 1.29}/_custom/rbac.libsonnet | 2 - .../1.29/_custom/volumeMounts.libsonnet | 323 ++++++++++++ .../_gen/admissionregistration/main.libsonnet | 1 + .../admissionregistration/v1/main.libsonnet | 1 + .../v1/matchCondition.libsonnet | 10 + .../v1/mutatingWebhook.libsonnet | 4 + .../v1/mutatingWebhookConfiguration.libsonnet | 20 +- .../v1}/ruleWithOperations.libsonnet | 0 .../v1}/serviceReference.libsonnet | 0 .../v1/validatingWebhook.libsonnet | 4 + .../validatingWebhookConfiguration.libsonnet | 20 +- .../v1}/webhookClientConfig.libsonnet | 0 .../v1alpha1/auditAnnotation.libsonnet | 10 + .../v1alpha1/expressionWarning.libsonnet | 10 + .../v1alpha1/main.libsonnet | 19 + .../v1alpha1/matchCondition.libsonnet | 10 + .../v1alpha1/matchResources.libsonnet | 38 ++ .../namedRuleWithOperations.libsonnet | 28 + .../v1alpha1/paramKind.libsonnet | 8 + .../v1alpha1/paramRef.libsonnet} | 12 +- .../v1alpha1/typeChecking.libsonnet | 10 + .../validatingAdmissionPolicy.libsonnet | 117 +++++ ...validatingAdmissionPolicyBinding.libsonnet | 118 +++++ ...datingAdmissionPolicyBindingSpec.libsonnet | 67 +++ .../validatingAdmissionPolicySpec.libsonnet | 66 +++ .../validatingAdmissionPolicyStatus.libsonnet | 19 + .../v1alpha1/validation.libsonnet | 14 + .../v1alpha1/variable.libsonnet | 10 + .../v1beta1/auditAnnotation.libsonnet | 10 + .../v1beta1/expressionWarning.libsonnet | 10 + .../v1beta1/main.libsonnet | 19 + .../v1beta1/matchCondition.libsonnet | 10 + .../v1beta1/matchResources.libsonnet | 38 ++ .../v1beta1/namedRuleWithOperations.libsonnet | 28 + .../v1beta1/paramKind.libsonnet | 8 + .../v1beta1/paramRef.libsonnet | 23 + .../v1beta1/typeChecking.libsonnet | 10 + .../validatingAdmissionPolicy.libsonnet | 117 +++++ ...validatingAdmissionPolicyBinding.libsonnet | 118 +++++ ...datingAdmissionPolicyBindingSpec.libsonnet | 67 +++ .../validatingAdmissionPolicySpec.libsonnet | 66 +++ .../validatingAdmissionPolicyStatus.libsonnet | 19 + .../v1beta1/validation.libsonnet | 14 + .../v1beta1/variable.libsonnet | 10 + .../1.29}/_gen/apiregistration/main.libsonnet | 1 - .../apiregistration/v1/apiService.libsonnet | 22 +- .../v1}/apiServiceCondition.libsonnet | 0 .../v1}/apiServiceSpec.libsonnet | 2 +- .../v1}/apiServiceStatus.libsonnet | 0 .../_gen/apiregistration/v1/main.libsonnet | 0 .../v1}/serviceReference.libsonnet | 0 .../_gen/apiserverinternal/main.libsonnet | 0 .../apiserverinternal/v1alpha1/main.libsonnet | 0 .../v1alpha1/serverStorageVersion.libsonnet | 4 + .../v1alpha1/storageVersion.libsonnet | 22 +- .../storageVersionCondition.libsonnet | 0 .../v1alpha1/storageVersionSpec.libsonnet | 0 .../v1alpha1/storageVersionStatus.libsonnet | 0 .../{1.21 => 1.29}/_gen/apps/main.libsonnet | 0 .../_gen/apps/v1/controllerRevision.libsonnet | 24 +- .../_gen/apps/v1/daemonSet.libsonnet | 97 ++-- .../_gen/apps/v1/daemonSetCondition.libsonnet | 0 .../_gen/apps/v1/daemonSetSpec.libsonnet | 77 +-- .../_gen/apps/v1/daemonSetStatus.libsonnet | 2 +- .../apps/v1/daemonSetUpdateStrategy.libsonnet | 0 .../1.29}/_gen/apps/v1/deployment.libsonnet | 97 ++-- .../apps/v1/deploymentCondition.libsonnet | 0 .../_gen/apps/v1/deploymentSpec.libsonnet | 77 +-- .../_gen/apps/v1/deploymentStatus.libsonnet | 2 +- .../_gen/apps/v1/deploymentStrategy.libsonnet | 0 .../_gen/apps/v1/main.libsonnet | 2 + .../_gen/apps/v1/replicaSet.libsonnet | 97 ++-- .../apps/v1/replicaSetCondition.libsonnet | 0 .../_gen/apps/v1/replicaSetSpec.libsonnet | 77 +-- .../_gen/apps/v1/replicaSetStatus.libsonnet | 4 +- .../apps/v1/rollingUpdateDaemonSet.libsonnet | 0 .../apps/v1/rollingUpdateDeployment.libsonnet | 0 ...rollingUpdateStatefulSetStrategy.libsonnet | 10 + .../1.29}/_gen/apps/v1/statefulSet.libsonnet | 117 +++-- .../apps/v1/statefulSetCondition.libsonnet | 0 .../apps/v1/statefulSetOrdinals.libsonnet | 8 + ...istentVolumeClaimRetentionPolicy.libsonnet | 10 + .../_gen/apps/v1/statefulSetSpec.libsonnet | 95 ++-- .../_gen/apps/v1/statefulSetStatus.libsonnet | 4 +- .../v1/statefulSetUpdateStrategy.libsonnet | 4 +- .../1.29}/_gen/authentication/main.libsonnet | 1 + .../v1/boundObjectReference.libsonnet | 0 .../_gen/authentication/v1/main.libsonnet | 2 + .../v1/selfSubjectReview.libsonnet | 54 ++ .../v1/selfSubjectReviewStatus.libsonnet | 21 + .../authentication/v1/tokenRequest.libsonnet | 24 +- .../v1/tokenRequestSpec.libsonnet | 4 +- .../v1/tokenRequestStatus.libsonnet | 0 .../authentication/v1/tokenReview.libsonnet | 20 +- .../v1}/tokenReviewSpec.libsonnet | 0 .../v1}/tokenReviewStatus.libsonnet | 0 .../authentication/v1}/userInfo.libsonnet | 0 .../authentication/v1alpha1/main.libsonnet | 6 + .../v1alpha1/selfSubjectReview.libsonnet | 54 ++ .../selfSubjectReviewStatus.libsonnet | 21 + .../authentication/v1beta1/main.libsonnet | 6 + .../v1beta1/selfSubjectReview.libsonnet | 54 ++ .../v1beta1/selfSubjectReviewStatus.libsonnet | 21 + .../1.29}/_gen/authorization/main.libsonnet | 1 - .../v1/localSubjectAccessReview.libsonnet | 20 +- .../_gen/authorization/v1/main.libsonnet | 0 .../v1}/nonResourceAttributes.libsonnet | 0 .../v1}/nonResourceRule.libsonnet | 0 .../v1}/resourceAttributes.libsonnet | 0 .../authorization/v1}/resourceRule.libsonnet | 0 .../v1/selfSubjectAccessReview.libsonnet | 20 +- .../v1}/selfSubjectAccessReviewSpec.libsonnet | 0 .../v1/selfSubjectRulesReview.libsonnet | 22 +- .../v1/selfSubjectRulesReviewSpec.libsonnet | 2 +- .../v1/subjectAccessReview.libsonnet | 20 +- .../v1/subjectAccessReviewSpec.libsonnet | 0 .../v1}/subjectAccessReviewStatus.libsonnet | 0 .../v1}/subjectRulesReviewStatus.libsonnet | 0 .../_gen/autoscaling/main.libsonnet | 3 +- .../v1/crossVersionObjectReference.libsonnet | 10 + .../v1/horizontalPodAutoscaler.libsonnet | 30 +- .../v1/horizontalPodAutoscalerSpec.libsonnet | 10 +- .../horizontalPodAutoscalerStatus.libsonnet | 8 +- .../_gen/autoscaling/v1/main.libsonnet | 0 .../1.29}/_gen/autoscaling/v1/scale.libsonnet | 22 +- .../_gen/autoscaling/v1/scaleSpec.libsonnet | 2 +- .../_gen/autoscaling/v1/scaleStatus.libsonnet | 10 + .../containerResourceMetricSource.libsonnet | 21 + .../containerResourceMetricStatus.libsonnet | 19 + .../v2/crossVersionObjectReference.libsonnet | 10 + .../v2/externalMetricSource.libsonnet | 33 ++ .../v2/externalMetricStatus.libsonnet | 31 ++ .../v2}/horizontalPodAutoscaler.libsonnet | 36 +- .../horizontalPodAutoscalerBehavior.libsonnet | 8 +- ...horizontalPodAutoscalerCondition.libsonnet | 0 .../v2}/horizontalPodAutoscalerSpec.libsonnet | 14 +- .../horizontalPodAutoscalerStatus.libsonnet | 0 .../v2}/hpaScalingPolicy.libsonnet | 6 +- .../autoscaling/v2}/hpaScalingRules.libsonnet | 4 +- .../1.29/_gen/autoscaling/v2}/main.libsonnet | 2 +- .../v2}/metricIdentifier.libsonnet | 0 .../_gen/autoscaling/v2/metricSpec.libsonnet | 141 ++++++ .../autoscaling/v2/metricStatus.libsonnet | 131 +++++ .../autoscaling/v2/metricTarget.libsonnet | 14 + .../v2/metricValueStatus.libsonnet | 12 + .../v2/objectMetricSource.libsonnet | 42 ++ .../v2/objectMetricStatus.libsonnet | 40 ++ .../autoscaling/v2/podsMetricSource.libsonnet | 33 ++ .../autoscaling/v2/podsMetricStatus.libsonnet | 31 ++ .../v2/resourceMetricSource.libsonnet | 19 + .../v2/resourceMetricStatus.libsonnet | 17 + .../{1.21 => 1.29}/_gen/batch/main.libsonnet | 1 - .../1.29}/_gen/batch/v1/cronJob.libsonnet | 142 +++--- .../_gen/batch/v1/cronJobSpec.libsonnet | 122 +++-- .../_gen/batch/v1}/cronJobStatus.libsonnet | 0 .../1.29}/_gen/batch/v1/job.libsonnet | 118 +++-- .../_gen/batch/v1/jobCondition.libsonnet | 0 .../_gen/batch/v1/jobSpec.libsonnet | 98 ++-- .../1.29}/_gen/batch/v1/jobStatus.libsonnet | 21 +- .../_gen/batch/v1/jobTemplateSpec.libsonnet | 118 +++-- .../_gen/batch/v1/main.libsonnet | 5 + .../_gen/batch/v1/podFailurePolicy.libsonnet | 10 + ...lurePolicyOnExitCodesRequirement.libsonnet | 14 + ...lurePolicyOnPodConditionsPattern.libsonnet | 8 + .../batch/v1/podFailurePolicyRule.libsonnet | 23 + .../v1/uncountedTerminatedPods.libsonnet | 14 + .../1.29}/_gen/certificates/main.libsonnet | 2 +- .../v1/certificateSigningRequest.libsonnet | 22 +- ...rtificateSigningRequestCondition.libsonnet | 0 .../certificateSigningRequestSpec.libsonnet | 2 + .../certificateSigningRequestStatus.libsonnet | 0 .../_gen/certificates/v1/main.libsonnet | 0 .../v1alpha1/clusterTrustBundle.libsonnet | 61 +++ .../v1alpha1/clusterTrustBundleSpec.libsonnet | 10 + .../_gen/certificates/v1alpha1/main.libsonnet | 6 + .../_gen/coordination/main.libsonnet | 1 - .../_gen/coordination/v1/lease.libsonnet | 22 +- .../_gen/coordination/v1}/leaseSpec.libsonnet | 2 +- .../_gen/coordination/v1/main.libsonnet | 0 .../{1.21 => 1.29}/_gen/core/main.libsonnet | 0 .../_gen/core/v1/affinity.libsonnet | 0 .../_gen/core/v1/attachedVolume.libsonnet | 0 ...awsElasticBlockStoreVolumeSource.libsonnet | 14 + .../core/v1/azureDiskVolumeSource.libsonnet | 18 + .../azureFilePersistentVolumeSource.libsonnet | 14 + .../core/v1/azureFileVolumeSource.libsonnet | 12 + .../_gen/core/v1/binding.libsonnet | 20 +- .../_gen/core/v1/capabilities.libsonnet | 0 .../v1/cephFSPersistentVolumeSource.libsonnet | 25 + .../_gen/core/v1/cephFSVolumeSource.libsonnet | 23 + .../v1/cinderPersistentVolumeSource.libsonnet | 10 +- .../_gen/core/v1/cinderVolumeSource.libsonnet | 6 +- .../1.29/_gen/core/v1/claimSource.libsonnet | 10 + .../_gen/core/v1/clientIPConfig.libsonnet | 0 .../v1/clusterTrustBundleProjection.libsonnet | 25 + .../_gen/core/v1/componentCondition.libsonnet | 0 .../_gen/core/v1/componentStatus.libsonnet | 20 +- .../1.29}/_gen/core/v1/configMap.libsonnet | 20 +- .../_gen/core/v1/configMapEnvSource.libsonnet | 0 .../core/v1/configMapKeySelector.libsonnet | 0 .../v1/configMapNodeConfigSource.libsonnet | 2 +- .../core/v1/configMapProjection.libsonnet | 14 + .../core/v1/configMapVolumeSource.libsonnet | 16 + .../_gen/core/v1/container.libsonnet | 85 +++- .../_gen/core/v1/containerImage.libsonnet | 4 +- .../_gen/core/v1/containerPort.libsonnet | 0 .../core/v1/containerResizePolicy.libsonnet | 10 + .../_gen/core/v1/containerState.libsonnet | 2 +- .../core/v1/containerStateRunning.libsonnet | 0 .../v1/containerStateTerminated.libsonnet | 2 +- .../core/v1/containerStateWaiting.libsonnet | 0 .../_gen/core/v1/containerStatus.libsonnet | 37 +- .../v1/csiPersistentVolumeSource.libsonnet | 35 +- .../_gen/core/v1/csiVolumeSource.libsonnet | 10 +- .../_gen/core/v1/daemonEndpoint.libsonnet | 0 .../core/v1/downwardAPIProjection.libsonnet | 0 .../core/v1/downwardAPIVolumeFile.libsonnet | 26 + .../core/v1/downwardAPIVolumeSource.libsonnet | 0 .../core/v1/emptyDirVolumeSource.libsonnet | 10 + .../_gen/core/v1/endpointAddress.libsonnet | 2 +- .../1.29/_gen/core/v1/endpointPort.libsonnet | 14 + .../_gen/core/v1/endpointSubset.libsonnet | 2 +- .../1.29}/_gen/core/v1/endpoints.libsonnet | 22 +- .../_gen/core/v1/envFromSource.libsonnet | 0 .../_gen/core/v1/envVar.libsonnet | 4 +- .../_gen/core/v1/envVarSource.libsonnet | 2 +- .../_gen/core/v1/ephemeralContainer.libsonnet | 89 +++- .../core/v1/ephemeralVolumeSource.libsonnet | 47 +- .../1.29}/_gen/core/v1/event.libsonnet | 20 +- .../_gen/core/v1/eventSeries.libsonnet | 0 .../_gen/core/v1/eventSource.libsonnet | 0 .../_gen/core/v1/execAction.libsonnet | 0 .../_gen/core/v1/fcVolumeSource.libsonnet | 20 + .../v1/flexPersistentVolumeSource.libsonnet | 14 +- .../_gen/core/v1/flexVolumeSource.libsonnet | 10 +- .../core/v1/flockerVolumeSource.libsonnet | 4 +- .../gcePersistentDiskVolumeSource.libsonnet | 14 + .../core/v1/gitRepoVolumeSource.libsonnet | 6 +- .../glusterfsPersistentVolumeSource.libsonnet | 8 +- .../core/v1/glusterfsVolumeSource.libsonnet | 6 +- .../1.29/_gen/core/v1/grpcAction.libsonnet | 10 + .../_gen/core/v1/hostAlias.libsonnet | 0 .../1.29/_gen/core/v1/hostIP.libsonnet | 8 + .../core/v1/hostPathVolumeSource.libsonnet | 4 +- .../_gen/core/v1/httpGetAction.libsonnet | 0 .../1.29}/_gen/core/v1/httpHeader.libsonnet | 2 +- .../v1/iscsiPersistentVolumeSource.libsonnet | 35 ++ .../_gen/core/v1/iscsiVolumeSource.libsonnet | 33 ++ .../1.29/_gen/core/v1/keyToPath.libsonnet | 12 + .../_gen/core/v1/lifecycle.libsonnet | 14 +- .../_gen/core/v1/lifecycleHandler.libsonnet | 7 +- .../_gen/core/v1/limitRange.libsonnet | 20 +- .../_gen/core/v1/limitRangeItem.libsonnet | 0 .../_gen/core/v1/limitRangeSpec.libsonnet | 0 .../core/v1/loadBalancerIngress.libsonnet | 2 + .../_gen/core/v1/loadBalancerStatus.libsonnet | 0 .../core/v1/localObjectReference.libsonnet | 0 .../_gen/core/v1/localVolumeSource.libsonnet | 10 + .../1.29}/_gen/core/v1/main.libsonnet | 17 +- .../_gen/core/v1/modifyVolumeStatus.libsonnet | 8 + .../_gen/core/v1/namespace.libsonnet | 20 +- .../_gen/core/v1/namespaceCondition.libsonnet | 0 .../_gen/core/v1/namespaceSpec.libsonnet | 0 .../_gen/core/v1/namespaceStatus.libsonnet | 0 .../_gen/core/v1/nfsVolumeSource.libsonnet | 6 +- .../1.29}/_gen/core/v1/node.libsonnet | 24 +- .../_gen/core/v1/nodeAddress.libsonnet | 0 .../_gen/core/v1/nodeAffinity.libsonnet | 0 .../_gen/core/v1/nodeCondition.libsonnet | 0 .../_gen/core/v1/nodeConfigSource.libsonnet | 4 +- .../_gen/core/v1/nodeConfigStatus.libsonnet | 12 +- .../core/v1/nodeDaemonEndpoints.libsonnet | 0 .../_gen/core/v1/nodeSelector.libsonnet | 0 .../core/v1/nodeSelectorRequirement.libsonnet | 0 .../_gen/core/v1/nodeSelectorTerm.libsonnet | 0 .../1.29}/_gen/core/v1/nodeSpec.libsonnet | 4 +- .../_gen/core/v1/nodeStatus.libsonnet | 18 +- .../_gen/core/v1/nodeSystemInfo.libsonnet | 2 +- .../core/v1/objectFieldSelector.libsonnet | 0 .../_gen/core/v1/objectReference.libsonnet | 0 .../_gen/core/v1/persistentVolume.libsonnet | 295 +++++------ .../core/v1/persistentVolumeClaim.libsonnet | 47 +- .../persistentVolumeClaimCondition.libsonnet | 6 +- .../v1/persistentVolumeClaimSpec.libsonnet | 27 +- .../v1/persistentVolumeClaimStatus.libsonnet | 35 ++ .../persistentVolumeClaimTemplate.libsonnet | 47 +- ...ersistentVolumeClaimVolumeSource.libsonnet | 4 +- .../core/v1/persistentVolumeSpec.libsonnet | 275 +++++----- .../core/v1/persistentVolumeStatus.libsonnet | 14 + ...photonPersistentDiskVolumeSource.libsonnet | 10 + .../1.29}/_gen/core/v1/pod.libsonnet | 77 +-- .../_gen/core/v1/podAffinity.libsonnet | 0 .../_gen/core/v1/podAffinityTerm.libsonnet | 12 +- .../_gen/core/v1/podAntiAffinity.libsonnet | 0 .../_gen/core/v1/podCondition.libsonnet | 0 .../_gen/core/v1/podDNSConfig.libsonnet | 0 .../_gen/core/v1/podDNSConfigOption.libsonnet | 0 .../1.29/_gen/core/v1/podIP.libsonnet | 8 + .../1.29/_gen/core/v1/podOS.libsonnet | 8 + .../_gen/core/v1/podReadinessGate.libsonnet | 0 .../_gen/core/v1/podResourceClaim.libsonnet | 15 + .../core/v1/podResourceClaimStatus.libsonnet | 10 + .../_gen/core/v1/podSchedulingGate.libsonnet | 8 + .../_gen/core/v1/podSecurityContext.libsonnet | 20 +- .../1.29}/_gen/core/v1/podSpec.libsonnet | 57 ++- .../1.29}/_gen/core/v1/podStatus.libsonnet | 24 +- .../_gen/core/v1/podTemplate.libsonnet | 97 ++-- .../_gen/core/v1/podTemplateSpec.libsonnet | 77 +-- .../_gen/core/v1/portStatus.libsonnet | 0 .../core/v1/portworxVolumeSource.libsonnet | 6 +- .../core/v1/preferredSchedulingTerm.libsonnet | 0 .../1.29}/_gen/core/v1/probe.libsonnet | 9 +- .../core/v1/projectedVolumeSource.libsonnet | 12 + .../core/v1/quobyteVolumeSource.libsonnet | 12 +- .../v1/rbdPersistentVolumeSource.libsonnet | 29 ++ .../_gen/core/v1/rbdVolumeSource.libsonnet | 27 + .../core/v1/replicationController.libsonnet | 97 ++-- .../replicationControllerCondition.libsonnet | 0 .../v1/replicationControllerSpec.libsonnet | 77 +-- .../v1/replicationControllerStatus.libsonnet | 2 +- .../1.29/_gen/core/v1/resourceClaim.libsonnet | 8 + .../core/v1/resourceFieldSelector.libsonnet | 12 + .../_gen/core/v1/resourceQuota.libsonnet | 20 +- .../_gen/core/v1/resourceQuotaSpec.libsonnet | 0 .../core/v1/resourceQuotaStatus.libsonnet | 0 .../core/v1/resourceRequirements.libsonnet | 18 + .../scaleIOPersistentVolumeSource.libsonnet | 31 ++ .../core/v1/scaleIOVolumeSource.libsonnet | 29 ++ .../_gen/core/v1/scopeSelector.libsonnet | 0 ...copedResourceSelectorRequirement.libsonnet | 0 .../_gen/core/v1/seLinuxOptions.libsonnet | 0 .../_gen/core/v1/seccompProfile.libsonnet | 2 +- .../1.29}/_gen/core/v1/secret.libsonnet | 22 +- .../_gen/core/v1/secretEnvSource.libsonnet | 0 .../_gen/core/v1/secretKeySelector.libsonnet | 0 .../_gen/core/v1/secretProjection.libsonnet | 14 + .../_gen/core/v1/secretReference.libsonnet | 4 +- .../_gen/core/v1/secretVolumeSource.libsonnet | 16 + .../_gen/core/v1/securityContext.libsonnet | 16 +- .../_gen/core/v1/service.libsonnet | 48 +- .../_gen/core/v1/serviceAccount.libsonnet | 24 +- .../serviceAccountTokenProjection.libsonnet | 6 +- .../_gen/core/v1/servicePort.libsonnet | 2 +- .../1.29}/_gen/core/v1/serviceSpec.libsonnet | 28 +- .../_gen/core/v1/serviceStatus.libsonnet | 0 .../core/v1/sessionAffinityConfig.libsonnet | 0 .../1.29/_gen/core/v1/sleepAction.libsonnet | 8 + .../storageOSPersistentVolumeSource.libsonnet | 8 +- .../core/v1/storageOSVolumeSource.libsonnet | 8 +- .../_gen/core/v1/sysctl.libsonnet | 0 .../_gen/core/v1/taint.libsonnet | 0 .../_gen/core/v1/tcpSocketAction.libsonnet | 0 .../_gen/core/v1/toleration.libsonnet | 0 ...topologySelectorLabelRequirement.libsonnet | 0 .../core/v1/topologySelectorTerm.libsonnet | 0 .../v1/topologySpreadConstraint.libsonnet | 33 ++ .../v1/typedLocalObjectReference.libsonnet | 0 .../core/v1/typedObjectReference.libsonnet | 14 + .../_gen/core/v1/volume.libsonnet | 287 ++++++----- .../_gen/core/v1/volumeDevice.libsonnet | 0 .../_gen/core/v1/volumeMount.libsonnet | 0 .../_gen/core/v1/volumeNodeAffinity.libsonnet | 0 .../_gen/core/v1/volumeProjection.libsonnet | 66 +++ .../v1/volumeResourceRequirements.libsonnet} | 6 +- .../vsphereVirtualDiskVolumeSource.libsonnet | 14 + .../core/v1/weightedPodAffinityTerm.libsonnet | 12 +- .../windowsSecurityContextOptions.libsonnet | 2 + .../_gen/discovery/main.libsonnet | 1 - .../_gen/discovery/v1/endpoint.libsonnet | 12 +- .../v1}/endpointConditions.libsonnet | 6 +- .../_gen/discovery/v1/endpointHints.libsonnet | 0 .../_gen/discovery/v1/endpointPort.libsonnet | 14 + .../_gen/discovery/v1/endpointSlice.libsonnet | 20 +- .../1.29/_gen/discovery/v1}/forZone.libsonnet | 0 .../_gen/discovery/v1/main.libsonnet | 0 .../{1.21 => 1.29}/_gen/events/main.libsonnet | 1 - .../1.29}/_gen/events/v1/event.libsonnet | 20 +- .../_gen/events/v1/eventSeries.libsonnet | 0 .../_gen/events/v1/main.libsonnet | 0 .../1.29}/_gen/flowcontrol/main.libsonnet | 3 +- ...exemptPriorityLevelConfiguration.libsonnet | 10 + .../v1/flowDistinguisherMethod.libsonnet | 8 + .../_gen/flowcontrol/v1/flowSchema.libsonnet | 73 +++ .../v1/flowSchemaCondition.libsonnet} | 8 +- .../flowcontrol/v1/flowSchemaSpec.libsonnet | 22 + .../flowcontrol/v1/flowSchemaStatus.libsonnet | 10 + .../flowcontrol/v1/groupSubject.libsonnet | 8 + .../flowcontrol/v1/limitResponse.libsonnet | 17 + ...imitedPriorityLevelConfiguration.libsonnet | 26 + .../1.29/_gen/flowcontrol/v1}/main.libsonnet | 3 +- .../v1/nonResourcePolicyRule.libsonnet | 14 + .../v1/policyRulesWithSubjects.libsonnet | 18 + .../v1/priorityLevelConfiguration.libsonnet | 89 ++++ ...orityLevelConfigurationCondition.libsonnet | 14 + ...orityLevelConfigurationReference.libsonnet | 8 + .../priorityLevelConfigurationSpec.libsonnet | 38 ++ ...priorityLevelConfigurationStatus.libsonnet | 10 + .../v1/queuingConfiguration.libsonnet | 12 + .../v1/resourcePolicyRule.libsonnet | 24 + .../v1/serviceAccountSubject.libsonnet | 10 + .../_gen/flowcontrol/v1/subject.libsonnet | 25 + .../_gen/flowcontrol/v1/userSubject.libsonnet | 8 + ...exemptPriorityLevelConfiguration.libsonnet | 10 + .../v1beta3/flowDistinguisherMethod.libsonnet | 8 + .../flowcontrol/v1beta3/flowSchema.libsonnet | 73 +++ .../v1beta3/flowSchemaCondition.libsonnet} | 8 +- .../v1beta3/flowSchemaSpec.libsonnet | 22 + .../v1beta3/flowSchemaStatus.libsonnet | 10 + .../v1beta3/groupSubject.libsonnet | 8 + .../v1beta3/limitResponse.libsonnet | 17 + ...imitedPriorityLevelConfiguration.libsonnet | 10 +- .../_gen/flowcontrol/v1beta3/main.libsonnet | 25 + .../v1beta3/nonResourcePolicyRule.libsonnet | 14 + .../v1beta3/policyRulesWithSubjects.libsonnet | 18 + .../priorityLevelConfiguration.libsonnet | 39 +- ...orityLevelConfigurationCondition.libsonnet | 14 + ...orityLevelConfigurationReference.libsonnet | 8 + .../priorityLevelConfigurationSpec.libsonnet | 38 ++ ...priorityLevelConfigurationStatus.libsonnet | 10 + .../v1beta3/queuingConfiguration.libsonnet | 12 + .../v1beta3/resourcePolicyRule.libsonnet | 24 + .../v1beta3/serviceAccountSubject.libsonnet | 10 + .../flowcontrol/v1beta3/subject.libsonnet | 25 + .../flowcontrol/v1beta3/userSubject.libsonnet | 8 + .../1.29/_gen/meta/main.libsonnet | 5 + .../1.29/_gen/meta/v1/apiGroup.libsonnet | 28 + .../1.29/_gen/meta/v1/apiGroupList.libsonnet | 15 + .../1.29/_gen/meta/v1/apiResource.libsonnet | 32 ++ .../_gen/meta/v1/apiResourceList.libsonnet | 17 + .../1.29/_gen/meta/v1/apiVersions.libsonnet | 19 + .../1.29/_gen/meta/v1/condition.libsonnet | 16 + .../1.29/_gen/meta/v1/deleteOptions.libsonnet | 28 + .../1.29/_gen/meta/v1/fieldsV1.libsonnet | 6 + .../v1/groupVersionForDiscovery.libsonnet | 10 + .../1.29/_gen/meta/v1/labelSelector.libsonnet | 14 + .../v1/labelSelectorRequirement.libsonnet | 14 + .../1.29/_gen/meta/v1/listMeta.libsonnet | 14 + .../1.29/_gen/meta/v1/main.libsonnet | 27 + .../_gen/meta/v1/managedFieldsEntry.libsonnet | 20 + .../1.29/_gen/meta/v1/microTime.libsonnet | 6 + .../1.29/_gen/meta/v1/objectMeta.libsonnet | 46 ++ .../_gen/meta/v1/ownerReference.libsonnet | 16 + .../1.29/_gen/meta/v1/patch.libsonnet | 6 + .../1.29/_gen/meta/v1/preconditions.libsonnet | 10 + .../v1/serverAddressByClientCIDR.libsonnet | 10 + .../1.29/_gen/meta/v1/statusCause.libsonnet | 12 + .../1.29/_gen/meta/v1/statusDetails.libsonnet | 20 + .../1.29/_gen/meta/v1/time.libsonnet | 6 + .../1.29/_gen/meta/v1/watchEvent.libsonnet | 17 + .../_gen/networking/main.libsonnet | 2 +- .../networking/v1/httpIngressPath.libsonnet | 10 +- .../v1}/httpIngressRuleValue.libsonnet | 4 +- .../_gen/networking/v1/ingress.libsonnet | 36 +- .../networking/v1/ingressBackend.libsonnet | 6 +- .../_gen/networking/v1/ingressClass.libsonnet | 32 +- .../ingressClassParametersReference.libsonnet | 10 +- .../networking/v1/ingressClassSpec.libsonnet | 12 +- .../v1/ingressLoadBalancerIngress.libsonnet | 14 + .../v1/ingressLoadBalancerStatus.libsonnet | 10 + .../networking/v1/ingressPortStatus.libsonnet | 12 + .../_gen/networking/v1/ingressRule.libsonnet | 6 +- .../v1/ingressServiceBackend.libsonnet | 6 +- .../_gen/networking/v1/ingressSpec.libsonnet | 16 +- .../networking/v1/ingressStatus.libsonnet | 13 + .../_gen/networking/v1/ingressTLS.libsonnet | 12 + .../1.29/_gen/networking/v1/ipBlock.libsonnet | 12 + .../_gen/networking/v1/main.libsonnet | 3 + .../networking/v1/networkPolicy.libsonnet | 32 +- .../v1/networkPolicyEgressRule.libsonnet | 14 + .../v1/networkPolicyIngressRule.libsonnet | 14 + .../networking/v1/networkPolicyPeer.libsonnet | 8 +- .../networking/v1/networkPolicyPort.libsonnet | 12 + .../networking/v1/networkPolicySpec.libsonnet | 29 ++ .../v1/serviceBackendPort.libsonnet | 4 +- .../networking/v1alpha1/ipAddress.libsonnet | 68 +++ .../v1alpha1/ipAddressSpec.libsonnet | 17 + .../_gen/networking/v1alpha1/main.libsonnet | 10 + .../v1alpha1/parentReference.libsonnet | 14 + .../networking/v1alpha1/serviceCIDR.libsonnet | 61 +++ .../v1alpha1/serviceCIDRSpec.libsonnet | 10 + .../v1alpha1/serviceCIDRStatus.libsonnet | 10 + .../1.29}/_gen/node/main.libsonnet | 2 - .../_gen/node/v1/main.libsonnet | 0 .../1.29/_gen/node/v1}/overhead.libsonnet | 4 +- .../1.29}/_gen/node/v1/runtimeClass.libsonnet | 26 +- .../1.29/_gen/node/v1}/scheduling.libsonnet | 0 .../1.29}/_gen/policy/main.libsonnet | 1 - .../1.29/_gen/policy/v1}/eviction.libsonnet | 22 +- .../1.29}/_gen/policy/v1/main.libsonnet | 1 + .../policy/v1/podDisruptionBudget.libsonnet | 22 +- .../v1/podDisruptionBudgetSpec.libsonnet | 2 + .../v1}/podDisruptionBudgetStatus.libsonnet | 0 .../{1.21 => 1.29}/_gen/rbac/main.libsonnet | 2 - .../_gen/rbac/v1}/aggregationRule.libsonnet | 0 .../_gen/rbac/v1/clusterRole.libsonnet | 20 +- .../_gen/rbac/v1/clusterRoleBinding.libsonnet | 20 +- .../_gen/rbac/v1/main.libsonnet | 0 .../1.29}/_gen/rbac/v1/policyRule.libsonnet | 12 +- .../_gen/rbac/v1/role.libsonnet | 20 +- .../1.29}/_gen/rbac/v1/roleBinding.libsonnet | 20 +- .../1.29/_gen/rbac/v1}/roleRef.libsonnet | 0 .../1.29/_gen/rbac/v1}/subject.libsonnet | 0 .../1.29/_gen/resource/main.libsonnet | 5 + .../v1alpha2/allocationResult.libsonnet | 19 + .../_gen/resource/v1alpha2/main.libsonnet | 19 + .../v1alpha2/podSchedulingContext.libsonnet | 63 +++ .../podSchedulingContextSpec.libsonnet | 12 + .../podSchedulingContextStatus.libsonnet | 10 + .../v1alpha2/resourceClaim.libsonnet} | 53 +- .../resourceClaimConsumerReference.libsonnet | 14 + ...resourceClaimParametersReference.libsonnet | 12 + .../resourceClaimSchedulingStatus.libsonnet | 12 + .../v1alpha2/resourceClaimSpec.libsonnet | 19 + .../v1alpha2/resourceClaimStatus.libsonnet | 30 ++ .../v1alpha2/resourceClaimTemplate.libsonnet | 116 +++++ .../resourceClaimTemplateSpec.libsonnet | 65 +++ .../resource/v1alpha2/resourceClass.libsonnet | 74 +++ ...resourceClassParametersReference.libsonnet | 14 + .../v1alpha2/resourceHandle.libsonnet | 10 + .../_gen/scheduling/main.libsonnet | 2 - .../_gen/scheduling/v1/main.libsonnet | 0 .../scheduling/v1/priorityClass.libsonnet | 24 +- .../1.29}/_gen/storage/main.libsonnet | 1 - .../_gen/storage/v1/csiDriver.libsonnet | 38 +- .../_gen/storage/v1/csiDriverSpec.libsonnet | 26 + .../1.29}/_gen/storage/v1/csiNode.libsonnet | 20 +- .../_gen/storage/v1}/csiNodeDriver.libsonnet | 4 +- .../_gen/storage/v1}/csiNodeSpec.libsonnet | 0 .../storage/v1}/csiStorageCapacity.libsonnet | 30 +- .../1.29/_gen/storage/v1}/main.libsonnet | 2 +- .../_gen/storage/v1/storageClass.libsonnet | 40 +- .../_gen/storage/v1/tokenRequest.libsonnet | 4 +- .../storage/v1/volumeAttachment.libsonnet | 301 +++++------ .../v1}/volumeAttachmentSource.libsonnet | 277 +++++----- .../storage/v1/volumeAttachmentSpec.libsonnet | 281 +++++----- .../v1/volumeAttachmentStatus.libsonnet | 26 + .../_gen/storage/v1/volumeError.libsonnet | 2 +- .../storage/v1/volumeNodeResources.libsonnet | 8 + .../_gen/storage}/v1alpha1/main.libsonnet | 2 +- .../v1alpha1/volumeAttributesClass.libsonnet | 47 +- .../k8s-libsonnet/1.29}/gen.libsonnet | 5 +- .../{1.21 => 1.29}/main.libsonnet | 0 1572 files changed, 17138 insertions(+), 25512 deletions(-) delete mode 120000 example/tk/vendor/1.21 create mode 120000 example/tk/vendor/1.29 create mode 100644 example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/render.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet delete mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/apps.libsonnet (93%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/autoscaling.libsonnet (94%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/batch.libsonnet (92%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/core.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/list.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/mapContainers.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/rbac.libsonnet (95%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/volumeMounts.libsonnet (98%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/main.libsonnet (80%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/main.libsonnet (91%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/mutatingWebhook.libsonnet (86%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/ruleWithOperations.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/serviceReference.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/validatingWebhook.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/webhookClientConfig.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet (83%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet => 1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet} (51%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet (83%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apiregistration/main.libsonnet (75%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/apiService.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/apiServiceCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apiregistration/v1/apiServiceSpec.libsonnet (96%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/apiServiceStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/serviceReference.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet (68%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet (83%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/controllerRevision.libsonnet (71%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSet.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/daemonSetSpec.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetStatus.libsonnet (93%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/deployment.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/deploymentSpec.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentStatus.libsonnet (93%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentStrategy.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/main.libsonnet (89%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSet.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSetCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSetSpec.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/replicaSetStatus.libsonnet (83%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/rollingUpdateDeployment.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSet.libsonnet (83%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/statefulSetCondition.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/statefulSetSpec.libsonnet (82%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSetStatus.libsonnet (88%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet (52%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/main.libsonnet (79%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/boundObjectReference.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/v1/main.libsonnet (80%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenRequest.libsonnet (80%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/v1/tokenRequestSpec.libsonnet (68%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenRequestStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenReview.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenReviewSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenReviewStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/userInfo.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/main.libsonnet (75%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/localSubjectAccessReview.libsonnet (87%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/nonResourceAttributes.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/nonResourceRule.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/resourceAttributes.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/resourceRule.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/selfSubjectAccessReview.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/selfSubjectRulesReview.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/authorization/v1beta1 => 1.29/_gen/authorization/v1}/selfSubjectRulesReviewSpec.libsonnet (64%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/subjectAccessReview.libsonnet (87%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/main.libsonnet (60%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet (79%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet (53%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet (51%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/v1/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/scale.libsonnet (82%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/scaleSpec.libsonnet (62%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscaler.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerBehavior.libsonnet (93%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta1 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerSpec.libsonnet (90%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta1 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/hpaScalingPolicy.libsonnet (79%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/hpaScalingRules.libsonnet (93%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/main.libsonnet (97%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/metricIdentifier.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/main.libsonnet (74%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/cronJob.libsonnet (82%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/cronJobSpec.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/cronJobStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/job.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/jobCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/jobSpec.libsonnet (80%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/jobStatus.libsonnet (53%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/batch/v1beta1 => 1.29/_gen/batch/v1}/jobTemplateSpec.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/main.libsonnet (53%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/certificates/main.libsonnet (74%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequest.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/main.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/main.libsonnet (75%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/v1/lease.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/v1/leaseSpec.libsonnet (95%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/v1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/affinity.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/attachedVolume.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/binding.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/capabilities.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/cinderPersistentVolumeSource.libsonnet (52%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/cinderVolumeSource.libsonnet (54%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/clientIPConfig.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/componentCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/componentStatus.libsonnet (84%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/configMap.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/configMapEnvSource.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/configMapKeySelector.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/configMapNodeConfigSource.libsonnet (90%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/container.libsonnet (78%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerImage.libsonnet (61%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerPort.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerState.libsonnet (96%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateRunning.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateTerminated.libsonnet (94%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateWaiting.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/containerStatus.libsonnet (51%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/csiPersistentVolumeSource.libsonnet (59%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/csiVolumeSource.libsonnet (71%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/daemonEndpoint.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/downwardAPIProjection.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/downwardAPIVolumeSource.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/endpointAddress.libsonnet (92%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/endpointSubset.libsonnet (87%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/endpoints.libsonnet (82%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envFromSource.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envVar.libsonnet (53%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envVarSource.libsonnet (53%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/ephemeralContainer.libsonnet (78%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/ephemeralVolumeSource.libsonnet (76%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/event.libsonnet (90%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/eventSeries.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/eventSource.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/execAction.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/flexPersistentVolumeSource.libsonnet (50%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/flexVolumeSource.libsonnet (51%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/flockerVolumeSource.libsonnet (54%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/gitRepoVolumeSource.libsonnet (53%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet (72%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/glusterfsVolumeSource.libsonnet (67%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/hostAlias.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/hostPathVolumeSource.libsonnet (83%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/httpGetAction.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/httpHeader.libsonnet (65%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/lifecycle.libsonnet (89%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet (88%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRange.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRangeItem.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRangeSpec.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/loadBalancerIngress.libsonnet (68%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/loadBalancerStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/localObjectReference.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/main.libsonnet (92%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespace.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nfsVolumeSource.libsonnet (81%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/node.libsonnet (87%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeAddress.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeAffinity.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeConfigSource.libsonnet (89%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeConfigStatus.libsonnet (92%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeDaemonEndpoints.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelector.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelectorRequirement.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelectorTerm.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeSpec.libsonnet (95%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeStatus.libsonnet (86%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeSystemInfo.libsonnet (94%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/objectFieldSelector.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/objectReference.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/persistentVolume.libsonnet (61%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaim.libsonnet (75%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimCondition.libsonnet (66%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimSpec.libsonnet (60%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet (75%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet (77%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/persistentVolumeSpec.libsonnet (54%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/pod.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAffinity.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAffinityTerm.libsonnet (59%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAntiAffinity.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podDNSConfig.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podDNSConfigOption.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podReadinessGate.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podSecurityContext.libsonnet (71%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podSpec.libsonnet (83%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podStatus.libsonnet (67%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podTemplate.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podTemplateSpec.libsonnet (84%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/portStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/portworxVolumeSource.libsonnet (66%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/preferredSchedulingTerm.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/probe.libsonnet (88%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/quobyteVolumeSource.libsonnet (76%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/replicationController.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerSpec.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerStatus.libsonnet (91%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuota.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuotaSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuotaStatus.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/scopeSelector.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/seLinuxOptions.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/seccompProfile.libsonnet (86%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/secret.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretEnvSource.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretKeySelector.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretReference.libsonnet (80%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/securityContext.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/service.libsonnet (70%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/serviceAccount.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/serviceAccountTokenProjection.libsonnet (87%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/servicePort.libsonnet (68%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/serviceSpec.libsonnet (63%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/serviceStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/sessionAffinityConfig.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/storageOSVolumeSource.libsonnet (70%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/sysctl.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/taint.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/tcpSocketAction.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/toleration.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/topologySelectorLabelRequirement.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/topologySelectorTerm.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/typedLocalObjectReference.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volume.libsonnet (57%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeDevice.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeMount.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeNodeAffinity.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/core/v1/resourceRequirements.libsonnet => 1.29/_gen/core/v1/volumeResourceRequirements.libsonnet} (64%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/weightedPodAffinityTerm.libsonnet (61%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/windowsSecurityContextOptions.libsonnet (74%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/main.libsonnet (75%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/discovery/v1/endpoint.libsonnet (89%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/endpointConditions.libsonnet (74%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/endpointHints.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/discovery/v1/endpointSlice.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/forZone.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/main.libsonnet (74%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/events/v1/event.libsonnet (90%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/v1/eventSeries.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/v1/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/flowcontrol/main.libsonnet (55%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/flowDistinguisherMethod.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/flowSchema.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/flowSchemaCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/flowSchemaSpec.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/flowSchemaStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/groupSubject.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/limitResponse.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1}/main.libsonnet (91%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/nonResourcePolicyRule.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/policyRulesWithSubjects.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/priorityLevelConfigurationCondition.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/priorityLevelConfigurationReference.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/priorityLevelConfigurationStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/queuingConfiguration.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/resourcePolicyRule.libsonnet (94%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/serviceAccountSubject.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/subject.libsonnet (92%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1}/userSubject.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/flowDistinguisherMethod.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/flowSchema.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/flowSchemaCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/flowSchemaSpec.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/flowSchemaStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/groupSubject.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/limitResponse.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1beta3}/limitedPriorityLevelConfiguration.libsonnet (50%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/nonResourcePolicyRule.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/policyRulesWithSubjects.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1beta3}/priorityLevelConfiguration.libsonnet (69%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/priorityLevelConfigurationCondition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/priorityLevelConfigurationReference.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/priorityLevelConfigurationStatus.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/queuingConfiguration.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/resourcePolicyRule.libsonnet (94%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/serviceAccountSubject.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/subject.libsonnet (92%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3}/userSubject.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/apiGroup.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/apiGroupList.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/apiResource.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/apiResourceList.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/apiVersions.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/condition.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/deleteOptions.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/fieldsV1.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/groupVersionForDiscovery.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/labelSelector.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/labelSelectorRequirement.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/listMeta.libsonnet (89%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/managedFieldsEntry.libsonnet (84%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/microTime.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/objectMeta.libsonnet (82%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/ownerReference.libsonnet (62%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/patch.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/preconditions.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/serverAddressByClientCIDR.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/statusCause.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/statusDetails.libsonnet (93%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/meta/v1/time.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/main.libsonnet (74%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/httpIngressPath.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1}/httpIngressRuleValue.libsonnet (64%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingress.libsonnet (75%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressBackend.libsonnet (90%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressClass.libsonnet (82%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressClassParametersReference.libsonnet (69%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/networking/v1beta1 => 1.29/_gen/networking/v1}/ingressClassSpec.libsonnet (73%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/extensions/v1beta1 => 1.29/_gen/networking/v1}/ingressRule.libsonnet (78%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressServiceBackend.libsonnet (81%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/ingressSpec.libsonnet (51%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/main.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/networkPolicy.libsonnet (68%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/networkPolicyPeer.libsonnet (84%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/serviceBackendPort.libsonnet (80%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/node/main.libsonnet (58%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/node/v1/main.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/node/v1/overhead.libsonnet (82%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/node/v1/runtimeClass.libsonnet (86%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/node/v1/scheduling.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/main.libsonnet (74%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1}/eviction.libsonnet (87%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/v1/main.libsonnet (87%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/v1/podDisruptionBudget.libsonnet (81%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/policy/v1beta1 => 1.29/_gen/policy/v1}/podDisruptionBudgetSpec.libsonnet (65%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/main.libsonnet (58%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/aggregationRule.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/clusterRole.libsonnet (85%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/clusterRoleBinding.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/policyRule.libsonnet (73%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/role.libsonnet (84%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/roleBinding.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/roleRef.libsonnet (100%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/subject.libsonnet (100%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/coordination/v1beta1/lease.libsonnet => 1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet} (73%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet => 1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet} (74%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/scheduling/main.libsonnet (59%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/scheduling/v1/main.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/scheduling/v1/priorityClass.libsonnet (81%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/main.libsonnet (80%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/storage/v1/csiDriver.libsonnet (69%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/csiNode.libsonnet (85%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/storage/v1/csiNodeDriver.libsonnet (80%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/storage/v1/csiNodeSpec.libsonnet (100%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/csiStorageCapacity.libsonnet (63%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/main.libsonnet (94%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/storageClass.libsonnet (74%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/tokenRequest.libsonnet (84%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/volumeAttachment.libsonnet (65%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/storage/v1beta1 => 1.29/_gen/storage/v1}/volumeAttachmentSource.libsonnet (57%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/storage/v1beta1 => 1.29/_gen/storage/v1}/volumeAttachmentSpec.libsonnet (59%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/volumeError.libsonnet (69%) create mode 100644 example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/scheduling => 1.29/_gen/storage}/v1alpha1/main.libsonnet (59%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet => 1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet} (71%) rename {operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/gen.libsonnet (90%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/main.libsonnet (100%) delete mode 120000 operations/jsonnet-compiled/util/vendor/1.21 create mode 120000 operations/jsonnet-compiled/util/vendor/1.29 delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/watchEvent.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet delete mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/apps.libsonnet (91%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/autoscaling.libsonnet (88%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/batch.libsonnet (92%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/core.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/list.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_custom/mapContainers.libsonnet (61%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_custom/rbac.libsonnet (95%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/main.libsonnet (80%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/main.libsonnet (91%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/mutatingWebhook.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1}/ruleWithOperations.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1}/serviceReference.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/admissionregistration/v1/validatingWebhook.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1}/webhookClientConfig.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet => 1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet} (51%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apiregistration/main.libsonnet (75%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/apiService.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1}/apiServiceCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/apiregistration/v1beta1 => 1.29/_gen/apiregistration/v1}/apiServiceSpec.libsonnet (96%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1}/apiServiceStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiregistration/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1}/serviceReference.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet (68%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet (83%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/controllerRevision.libsonnet (71%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSet.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetCondition.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/daemonSetSpec.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetStatus.libsonnet (93%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/deployment.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentCondition.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/deploymentSpec.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentStatus.libsonnet (93%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/deploymentStrategy.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/main.libsonnet (89%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSet.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSetCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/replicaSetSpec.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/replicaSetStatus.libsonnet (83%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/rollingUpdateDeployment.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSet.libsonnet (83%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/statefulSetCondition.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/apps/v1/statefulSetSpec.libsonnet (82%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSetStatus.libsonnet (88%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet (52%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/main.libsonnet (79%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/boundObjectReference.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/v1/main.libsonnet (80%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenRequest.libsonnet (80%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authentication/v1/tokenRequestSpec.libsonnet (68%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenRequestStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authentication/v1/tokenReview.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1}/tokenReviewSpec.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1}/tokenReviewStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1}/userInfo.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/main.libsonnet (75%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/localSubjectAccessReview.libsonnet (87%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/nonResourceAttributes.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/nonResourceRule.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/resourceAttributes.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/resourceRule.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/selfSubjectAccessReview.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/selfSubjectAccessReviewSpec.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/selfSubjectRulesReview.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet (64%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/authorization/v1/subjectAccessReview.libsonnet (87%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/subjectAccessReviewStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1}/subjectRulesReviewStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/main.libsonnet (60%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet (79%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet (53%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet (51%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/autoscaling/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/scale.libsonnet (82%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/autoscaling/v1/scaleSpec.libsonnet (62%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscaler.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerBehavior.libsonnet (93%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerCondition.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerSpec.libsonnet (90%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/horizontalPodAutoscalerStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/hpaScalingPolicy.libsonnet (79%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/hpaScalingRules.libsonnet (93%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2}/main.libsonnet (97%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2 => 1.29/_gen/autoscaling/v2}/metricIdentifier.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/main.libsonnet (74%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/cronJob.libsonnet (82%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/cronJobSpec.libsonnet (81%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1}/cronJobStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/job.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/jobCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/jobSpec.libsonnet (80%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/batch/v1/jobStatus.libsonnet (53%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/jobTemplateSpec.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/batch/v1/main.libsonnet (53%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/certificates/main.libsonnet (74%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequest.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet (84%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/certificates/v1/main.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/main.libsonnet (75%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/v1/lease.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1}/leaseSpec.libsonnet (95%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/coordination/v1/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/affinity.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/attachedVolume.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/binding.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/capabilities.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/cinderPersistentVolumeSource.libsonnet (52%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/cinderVolumeSource.libsonnet (54%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/clientIPConfig.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/componentCondition.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/componentStatus.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/configMap.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/configMapEnvSource.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/configMapKeySelector.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/configMapNodeConfigSource.libsonnet (90%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/container.libsonnet (78%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerImage.libsonnet (61%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerPort.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerState.libsonnet (96%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateRunning.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateTerminated.libsonnet (94%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/containerStateWaiting.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/containerStatus.libsonnet (51%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/csiPersistentVolumeSource.libsonnet (59%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/csiVolumeSource.libsonnet (71%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/daemonEndpoint.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/downwardAPIProjection.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/downwardAPIVolumeSource.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/endpointAddress.libsonnet (92%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/endpointSubset.libsonnet (87%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/endpoints.libsonnet (82%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envFromSource.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envVar.libsonnet (53%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/envVarSource.libsonnet (53%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/ephemeralContainer.libsonnet (78%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/ephemeralVolumeSource.libsonnet (76%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/event.libsonnet (90%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/eventSeries.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/eventSource.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/execAction.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/flexPersistentVolumeSource.libsonnet (50%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/flexVolumeSource.libsonnet (51%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/flockerVolumeSource.libsonnet (54%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/gitRepoVolumeSource.libsonnet (53%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet (72%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/glusterfsVolumeSource.libsonnet (67%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/hostAlias.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/hostPathVolumeSource.libsonnet (83%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/httpGetAction.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/httpHeader.libsonnet (65%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/lifecycle.libsonnet (89%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet (88%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRange.libsonnet (84%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRangeItem.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/limitRangeSpec.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/loadBalancerIngress.libsonnet (68%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/loadBalancerStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/localObjectReference.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/main.libsonnet (92%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespace.libsonnet (84%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceSpec.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/namespaceStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nfsVolumeSource.libsonnet (81%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/node.libsonnet (87%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeAddress.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeAffinity.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeConfigSource.libsonnet (89%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeConfigStatus.libsonnet (92%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeDaemonEndpoints.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelector.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelectorRequirement.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeSelectorTerm.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeSpec.libsonnet (95%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/nodeStatus.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/nodeSystemInfo.libsonnet (94%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/objectFieldSelector.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/objectReference.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/persistentVolume.libsonnet (61%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaim.libsonnet (75%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimCondition.libsonnet (66%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimSpec.libsonnet (60%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet (75%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet (77%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/persistentVolumeSpec.libsonnet (54%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/pod.libsonnet (84%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAffinity.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAffinityTerm.libsonnet (59%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podAntiAffinity.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podDNSConfig.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podDNSConfigOption.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podReadinessGate.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podSecurityContext.libsonnet (71%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podSpec.libsonnet (83%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podStatus.libsonnet (67%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/podTemplate.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/podTemplateSpec.libsonnet (84%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/portStatus.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/portworxVolumeSource.libsonnet (66%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/preferredSchedulingTerm.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/probe.libsonnet (88%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/quobyteVolumeSource.libsonnet (76%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/replicationController.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerCondition.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerSpec.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/replicationControllerStatus.libsonnet (91%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuota.libsonnet (86%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuotaSpec.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/resourceQuotaStatus.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/scopeSelector.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/seLinuxOptions.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/seccompProfile.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/secret.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretEnvSource.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretKeySelector.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/secretReference.libsonnet (80%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/securityContext.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/service.libsonnet (70%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/serviceAccount.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/serviceAccountTokenProjection.libsonnet (87%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/servicePort.libsonnet (68%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/serviceSpec.libsonnet (63%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/serviceStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/sessionAffinityConfig.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/storageOSVolumeSource.libsonnet (70%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/sysctl.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/taint.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/tcpSocketAction.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/toleration.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/topologySelectorLabelRequirement.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/topologySelectorTerm.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/typedLocalObjectReference.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volume.libsonnet (57%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeDevice.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeMount.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/volumeNodeAffinity.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/core/v1/resourceRequirements.libsonnet => 1.29/_gen/core/v1/volumeResourceRequirements.libsonnet} (64%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/core/v1/weightedPodAffinityTerm.libsonnet (61%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/core/v1/windowsSecurityContextOptions.libsonnet (74%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/main.libsonnet (75%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/discovery/v1/endpoint.libsonnet (89%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1}/endpointConditions.libsonnet (74%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/endpointHints.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/discovery/v1/endpointSlice.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1}/forZone.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/discovery/v1/main.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/main.libsonnet (74%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/events/v1/event.libsonnet (90%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/v1/eventSeries.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/events/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/flowcontrol/main.libsonnet (55%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet => 1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet} (52%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1}/main.libsonnet (91%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet => 1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet} (52%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1beta3}/limitedPriorityLevelConfiguration.libsonnet (50%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/flowcontrol/v1beta1 => 1.29/_gen/flowcontrol/v1beta3}/priorityLevelConfiguration.libsonnet (69%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/main.libsonnet (74%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/httpIngressPath.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1}/httpIngressRuleValue.libsonnet (64%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingress.libsonnet (75%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressBackend.libsonnet (90%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressClass.libsonnet (82%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1}/ingressClassParametersReference.libsonnet (69%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/ingressClassSpec.libsonnet (73%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/ingressRule.libsonnet (78%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/ingressServiceBackend.libsonnet (81%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/ingressSpec.libsonnet (51%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/main.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/networkPolicy.libsonnet (68%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/networking/v1/networkPolicyPeer.libsonnet (84%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/networking/v1/serviceBackendPort.libsonnet (80%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/node/main.libsonnet (58%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/node/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1}/overhead.libsonnet (82%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/node/v1/runtimeClass.libsonnet (86%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1}/scheduling.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/main.libsonnet (74%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1}/eviction.libsonnet (87%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/v1/main.libsonnet (87%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/policy/v1/podDisruptionBudget.libsonnet (81%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet (65%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1}/podDisruptionBudgetStatus.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/main.libsonnet (58%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1}/aggregationRule.libsonnet (100%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/clusterRole.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/clusterRoleBinding.libsonnet (85%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/policyRule.libsonnet (73%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/rbac/v1/role.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/rbac/v1/roleBinding.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1}/roleRef.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1}/subject.libsonnet (100%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/coordination/v1beta1/lease.libsonnet => 1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet} (73%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/scheduling/main.libsonnet (59%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/scheduling/v1/main.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/scheduling/v1/priorityClass.libsonnet (81%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/main.libsonnet (80%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/storage/v1/csiDriver.libsonnet (69%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/csiNode.libsonnet (85%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/csiNodeDriver.libsonnet (80%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/csiNodeSpec.libsonnet (100%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/csiStorageCapacity.libsonnet (63%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/main.libsonnet (94%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/storageClass.libsonnet (74%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/tokenRequest.libsonnet (84%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/volumeAttachment.libsonnet (65%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1}/volumeAttachmentSource.libsonnet (57%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/_gen/storage/v1/volumeAttachmentSpec.libsonnet (59%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/_gen/storage/v1/volumeError.libsonnet (69%) create mode 100644 operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21/_gen/scheduling => 1.29/_gen/storage}/v1alpha1/main.libsonnet (59%) rename example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet (71%) rename {example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21 => operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29}/gen.libsonnet (87%) rename operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/{1.21 => 1.29}/main.libsonnet (100%) diff --git a/example/tk/jsonnetfile.json b/example/tk/jsonnetfile.json index 67c17dafb4e..e96000d6c2c 100644 --- a/example/tk/jsonnetfile.json +++ b/example/tk/jsonnetfile.json @@ -68,7 +68,7 @@ "source": { "git": { "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" + "subdir": "1.29" } }, "version": "main" diff --git a/example/tk/jsonnetfile.lock.json b/example/tk/jsonnetfile.lock.json index 41111860e7c..ef53d72468e 100644 --- a/example/tk/jsonnetfile.lock.json +++ b/example/tk/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "grafana" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "7rANfqY8ERvoABHbwoGsdGpUeHxxYCSVOcM4Eky4QtQ=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "Y5nheroSOIwmE+djEVPq4OvvTxKenzdHhpEwaR3Ebjs=" }, { "source": { @@ -18,8 +18,8 @@ "subdir": "grafana-builder" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "B49EzIY2WZsFxNMJcgRxE/gcZ9ltnS8pkOOV6Q5qioc=" }, { "source": { @@ -28,8 +28,8 @@ "subdir": "ksonnet-util" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "JDsc/bUs5Yv1RkGKcm0hMteqCKZqemxA3qP6eiEATr8=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "0y3AFX9LQSpfWTxWKSwoLgbt0Wc9nnCwhMH2szKzHv0=" }, { "source": { @@ -38,8 +38,8 @@ "subdir": "kube-state-metrics/" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "kH7gD2rdqRtBujmCObN0ifNF/BkSZU8pleFRI8itkqY=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "q1YzD+I4InDdfQP6k93W9Lw5U1jpll3gVaS9rUzLR7U=" }, { "source": { @@ -48,8 +48,8 @@ "subdir": "memcached" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "dTOeEux3t9bYSqP2L/uCuLo/wUDpCKH4w+4OD9fePUk=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "Cc715Y3rgTuimgDFIw+FaKzXSJGRYwt1pFTMbdrNBD8=" }, { "source": { @@ -58,8 +58,8 @@ "subdir": "prometheus" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "1/gaGD2Wu6ZML+sXTEKo5zZC/VyBgS30yznbprRyy84=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "b4scQI+UtFxkVgzzD3dfbPMwI7EVtyjv1uETEmizM8M=" }, { "source": { @@ -68,8 +68,8 @@ "subdir": "tanka-util" } }, - "version": "264a5c2078c5930af57fe2d107eff83ab63553af", - "sum": "AGgjH6IJe/1qwNtxFIiG8V1uyOJZascEydQsNrfPQ4c=" + "version": "84e49c8549fa472c963862f233422c8b368afabe", + "sum": "ShSIissXdvCy1izTCDZX6tY7qxCoepE5L+WJ52Hw7ZQ=" }, { "source": { @@ -78,18 +78,18 @@ "subdir": "doc-util" } }, - "version": "fc3f9bca2dff836b0e924a993bdf11bc51af78d4", - "sum": "JUBWG9ybm0TlY3uCWrNoQS00BcfPlCvuK9jPFU0NIj8=" + "version": "6ac6c69685b8c29c54515448eaca583da2d88150", + "sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U=" }, { "source": { "git": { "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" + "subdir": "1.29" } }, - "version": "f8efa81cf15257bd151b97e31599e20b2ba5311b", - "sum": "FYub7WxElJkqjjXA++DemsKHwsPqUFW945BTgpVop6Q=" + "version": "bf9a62cfd32a58c071b8410bfcdec058475dd25e", + "sum": "i2w3hGbgQmaB73t5LJHSioPOVdYv8ZBvivHiDwZJVyI=" } ], "legacyImports": false diff --git a/example/tk/vendor/1.21 b/example/tk/vendor/1.21 deleted file mode 120000 index 406ebf32b16..00000000000 --- a/example/tk/vendor/1.21 +++ /dev/null @@ -1 +0,0 @@ -github.com/jsonnet-libs/k8s-libsonnet/1.21 \ No newline at end of file diff --git a/example/tk/vendor/1.29 b/example/tk/vendor/1.29 new file mode 120000 index 00000000000..70e86069e0f --- /dev/null +++ b/example/tk/vendor/1.29 @@ -0,0 +1 @@ +github.com/jsonnet-libs/k8s-libsonnet/1.29 \ No newline at end of file diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet index c98e2b36061..0bd0b339493 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet @@ -1,5 +1,5 @@ { - dashboard(title, uid='', datasource='default'):: { + dashboard(title, uid='', datasource='default', datasource_regex=''):: { // Stuff that isn't materialised. _nextPanel:: 1, addRow(row):: self { @@ -13,7 +13,7 @@ rows+: [row { panels: panels }], }, - addTemplate(name, metric_name, label_name, hide=0, allValue=null):: self { + addTemplate(name, metric_name, label_name, hide=0, allValue=null, includeAll=false, sort=2):: self { templating+: { list+: [{ allValue: allValue, @@ -23,7 +23,7 @@ }, datasource: '$datasource', hide: hide, - includeAll: false, + includeAll: includeAll, label: name, multi: false, name: name, @@ -31,7 +31,7 @@ query: 'label_values(%s, %s)' % [metric_name, label_name], refresh: 1, regex: '', - sort: 2, + sort: sort, tagValuesQuery: '', tags: [], tagsQuery: '', @@ -41,7 +41,7 @@ }, }, - addMultiTemplate(name, metric_name, label_name, hide=0, allValue='.+'):: self { + addMultiTemplate(name, metric_name, label_name, hide=0, allValue='.+', sort=2):: self { templating+: { list+: [{ allValue: allValue, @@ -60,7 +60,7 @@ query: 'label_values(%s, %s)' % [metric_name, label_name], refresh: 1, regex: '', - sort: 2, + sort: sort, tagValuesQuery: '', tags: [], tagsQuery: '', @@ -109,12 +109,12 @@ value: datasource, }, hide: 0, - label: 'Data Source', + label: 'Data source', name: 'datasource', options: [], query: 'prometheus', refresh: 1, - regex: '', + regex: datasource_regex, type: 'datasource', }, ], @@ -178,6 +178,7 @@ titleSize: 'h6', }, + // "graph" type, now deprecated. panel(title):: { aliasColors: {}, bars: false, @@ -228,6 +229,46 @@ yaxes: $.yaxes('short'), }, + // "timeseries" panel, introduced with Grafana 7.4 and made standard in 8.0. + timeseriesPanel(title):: { + datasource: '$datasource', + fieldConfig: { + defaults: { + custom: { + drawStyle: 'line', + fillOpacity: 1, + lineWidth: 1, + pointSize: 5, + showPoints: 'never', + spanNulls: false, + stacking: { + group: 'A', + mode: 'none', + }, + }, + thresholds: { + mode: 'absolute', + steps: [], + }, + unit: 's', + }, + overrides: [], + }, + options: { + legend: { + showLegend: true, + }, + tooltip: { + mode: 'single', + sort: 'none', + }, + }, + links: [], + targets: [], + title: title, + type: 'timeseries', + }, + queryPanel(queries, legends, legendLink=null):: { local qs = @@ -248,9 +289,7 @@ legendLink: legendLink, expr: ql.q, format: 'time_series', - intervalFactor: 2, legendFormat: ql.l, - step: 10, } for ql in qsandls ], @@ -265,7 +304,6 @@ expr: query, format: 'time_series', instant: true, - intervalFactor: 2, refId: 'A', }, ], @@ -328,9 +366,7 @@ expr: qs[i], format: 'table', instant: true, - intervalFactor: 2, legendFormat: '', - step: 10, refId: std.char(65 + i), } for i in std.range(0, std.length(qs) - 1) @@ -391,8 +427,10 @@ '3xx': '#6ED0E0', '4xx': '#EF843C', '5xx': '#E24D42', + OK: '#7EB26D', success: '#7EB26D', 'error': '#E24D42', + cancel: '#A9A9A9', }, targets: [ { @@ -401,13 +439,11 @@ sum by (status) ( label_replace(label_replace(rate(%s[$__rate_interval]), "status", "${1}xx", "%s", "([0-9]).."), - "status", "${1}", "%s", "([a-z]+)")) + "status", "${1}", "%s", "([a-zA-Z]+)")) ||| % [selector, statusLabelName, statusLabelName], format: 'time_series', - intervalFactor: 2, legendFormat: '{{status}}', refId: 'A', - step: 10, }, ], } + $.stack, @@ -418,26 +454,20 @@ { expr: 'histogram_quantile(0.99, sum(rate(%s_bucket%s[$__rate_interval])) by (le)) * %s' % [metricName, selector, multiplier], format: 'time_series', - intervalFactor: 2, legendFormat: '99th Percentile', refId: 'A', - step: 10, }, { expr: 'histogram_quantile(0.50, sum(rate(%s_bucket%s[$__rate_interval])) by (le)) * %s' % [metricName, selector, multiplier], format: 'time_series', - intervalFactor: 2, legendFormat: '50th Percentile', refId: 'B', - step: 10, }, { expr: 'sum(rate(%s_sum%s[$__rate_interval])) * %s / sum(rate(%s_count%s[$__rate_interval]))' % [metricName, selector, multiplier, metricName, selector], format: 'time_series', - intervalFactor: 2, legendFormat: 'Average', refId: 'C', - step: 10, }, ], yaxes: $.yaxes('ms'), diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/config.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/config.libsonnet index f258bd9f00e..4a1de96a375 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/config.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/config.libsonnet @@ -9,6 +9,15 @@ provisioningDir: '/etc/grafana/provisioning', port: 80, containerPort: 3000, + + // Split configmaps into multiple files + // 100000 is a good default, because a `kubectl` client-side apply cannot exceed 256kB, + // and the size of the request is doubled when setting the last-applied configuration + // https://github.com/kubernetes/kubectl/issues/712 + // For serverside applies, it can be increased, but keep in mind the 1MB limit of etcd + // https://github.com/kubernetes/kubernetes/issues/19781 + configmap_shard_size: 100000, + labels+: { dashboards: {}, datasources: {}, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/configmaps.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/configmaps.libsonnet index 04140e97dee..0ab5ed70d64 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/configmaps.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana/configmaps.libsonnet @@ -74,7 +74,7 @@ local deployment = k.apps.v1.deployment; // Shard configmaps at around 100kB per shard local totalCharacters = std.foldl(function(x, y) x + y, [std.length(d.content) for d in dashboards], 0); - local shardCount = std.min(count, std.ceil(totalCharacters / 100000)); + local shardCount = std.min(count, std.ceil(totalCharacters / $._config.configmap_shard_size)); { // Calculate the number of dashboards per shard // This is skewed towards tail dashboards (smallest ones) diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/grafana.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/grafana.libsonnet index 8277402227e..9f6c760357f 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/grafana.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/grafana.libsonnet @@ -60,10 +60,6 @@ }, }, - extensions+: { - v1beta1+: appsExtentions, - }, - apps+: { v1beta1+: appsExtentions, v1+: appsExtentions, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-custom.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-custom.libsonnet index 2d42997ab8f..ba39d7a5cfd 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-custom.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-custom.libsonnet @@ -144,10 +144,6 @@ }, }, - extensions+: { - v1beta1+: appsExtentions, - }, - apps+: { v1beta1+: appsExtentions, v1+: appsExtentions, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-noname.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-noname.libsonnet index 9529244f299..30b54c0b34b 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-noname.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-noname.libsonnet @@ -5,11 +5,6 @@ function(noNewEmptyNameMixin) { core+: { v1+: { persistentVolumeClaim+: noNewEmptyNameMixin, } }, - extensions+: { - v1beta1+: { - ingress+: noNewEmptyNameMixin, - }, - }, networking+: { v1beta1+: { ingress+: noNewEmptyNameMixin, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-subtypes.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-subtypes.libsonnet index 286928648ea..59cd1b849c9 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-subtypes.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-subtypes.libsonnet @@ -79,17 +79,6 @@ v1+: appsPatch, v1beta1+: appsPatch, }, - extensions+: { - v1beta1+: appsPatch { - ingress+: { - spec+: { - rulesType: $.extensions.v1beta1.ingressRule { - httpType+: { pathsType: $.extensions.v1beta1.httpIngressPath }, - }, - }, - }, - }, - }, batch+: { local patch = { @@ -111,13 +100,13 @@ local rbacPatch = { local role = { - rulesType: $.rbac.v1beta1.policyRule, + rulesType: $.rbac.v1.policyRule, }, role+: role, clusterRole+: role, local binding = { - subjectsType: $.rbac.v1beta1.subject, + subjectsType: $.rbac.v1.subject, }, roleBinding+: binding, clusterRoleBinding+: binding, @@ -130,6 +119,7 @@ }, rbac+: { v1+: rbacPatch, + // TODO: the v1beta1 RBAC API has been removed in Kubernetes 1.22 and should get removed once 1.22 is the oldest supported version v1beta1+: rbacPatch, }, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-types.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-types.libsonnet index b339021a935..6118ac544f5 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-types.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/legacy-types.libsonnet @@ -15,12 +15,13 @@ }, rbac+: { v1+: { - policyRule:: $.rbac.v1beta1.clusterRole.rulesType, - subject:: $.rbac.v1beta1.clusterRoleBinding.subjectsType, + policyRule:: $.rbac.v1.clusterRole.rulesType, + subject:: $.rbac.v1.clusterRoleBinding.subjectsType, }, + // TODO: the v1beta1 RBAC API has been removed in Kubernetes 1.22 and should get removed once 1.22 is the oldest supported version v1beta1+: { - policyRule:: $.rbac.v1beta1.clusterRole.rulesType, - subject:: $.rbac.v1beta1.clusterRoleBinding.subjectsType, + policyRule:: $.rbac.v1.clusterRole.rulesType, + subject:: $.rbac.v1.clusterRoleBinding.subjectsType, }, }, } diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet index 5c785669b82..f8b798a9a5b 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet @@ -162,7 +162,7 @@ local util(k) = { ]) + (if annotations != {} then deployment.mixin.spec.template.metadata.withAnnotationsMixin(annotations) else {}), - hostVolumeMount(name, hostPath, path, readOnly=false, volumeMountMixin={}):: + hostVolumeMount(name, hostPath, path, readOnly=false, volumeMountMixin={}, volumeMixin={}):: local container = k.core.v1.container, deployment = k.apps.v1.deployment, volumeMount = k.core.v1.volumeMount, @@ -174,7 +174,8 @@ local util(k) = { deployment.mapContainers(addMount) + deployment.mixin.spec.template.spec.withVolumesMixin([ - volume.fromHostPath(name, hostPath), + volume.fromHostPath(name, hostPath) + + volumeMixin, ]), pvcVolumeMount(pvcName, path, readOnly=false, volumeMountMixin={}):: @@ -225,7 +226,9 @@ local util(k) = { manifestYaml(value):: ( local f = std.native('manifestYamlFromJson'); - f(std.toString(value)) + if f != null + then f(std.toString(value)) + else std.manifestYamlDoc(value) ), resourcesRequests(cpu, memory):: @@ -238,6 +241,16 @@ local util(k) = { else {}) ), + resourcesRequestsMixin(cpu, memory):: + k.core.v1.container.mixin.resources.withRequestsMixin( + (if cpu != null + then { cpu: cpu } + else {}) + + (if memory != null + then { memory: memory } + else {}) + ), + resourcesLimits(cpu, memory):: k.core.v1.container.mixin.resources.withLimits( (if cpu != null @@ -248,6 +261,16 @@ local util(k) = { else {}) ), + resourcesLimitsMixin(cpu, memory):: + k.core.v1.container.mixin.resources.withLimitsMixin( + (if cpu != null + then { cpu: cpu } + else {}) + + (if memory != null + then { memory: memory } + else {}) + ), + antiAffinity: { local deployment = k.apps.v1.deployment, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/README.md b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/README.md index 575039f3f88..326ca5c7ebc 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/README.md +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/README.md @@ -21,7 +21,7 @@ local ksm = import 'github.com/grafana/jsonnet-libs/kube-state-metrics/main.libs ksm: ksm.new(namespace), prometheus_config+: { - scape_configs+: [ + scrape_configs+: [ ksm.scrape_config(namespace), ], }, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/main.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/main.libsonnet index 66c096879dd..8819bccae96 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/main.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/main.libsonnet @@ -3,7 +3,7 @@ local kausal = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libso { new( namespace, - image='k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.1.0', + image='registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.1.0', ):: { local k = kausal { _config+: { @@ -21,7 +21,7 @@ local kausal = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libso '--telemetry-port=8081', ]) + container.withPorts([ - containerPort.new('http-metrics', 8080), + containerPort.new('ksm', 8080), containerPort.new('self-metrics', 8081), ]) + k.util.resourcesRequests('50m', '50Mi') @@ -32,12 +32,9 @@ local kausal = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libso deployment.new('kube-state-metrics', 1, [self.container]) + deployment.mixin.spec.template.spec.withServiceAccountName(self.rbac.service_account.metadata.name) + deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) - + deployment.mixin.spec.template.spec.securityContext.withRunAsGroup(65534) - // Prevent default pod discovery from scraping, use ./scrape_config.libsonnet instead - // to preserve namespace etc labels. - + deployment.mixin.spec.template.metadata.withAnnotationsMixin({ 'prometheus.io.scrape': 'false' }), + + deployment.mixin.spec.template.spec.securityContext.withRunAsGroup(65534), - local policyRule = k.rbac.v1beta1.policyRule, + local policyRule = k.rbac.v1.policyRule, rbac: k.util.rbac('kube-state-metrics', [ policyRule.new() diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/scrape_config.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/scrape_config.libsonnet index c76046c182a..f6b3c45e1b4 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/scrape_config.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/kube-state-metrics/scrape_config.libsonnet @@ -22,6 +22,16 @@ function(namespace) { action: 'keep', }, + // Drop anything whose port is not 'ksm', these are the metrics computed by + // kube-state-metrics itself and not the 'self metrics' which should be + // scraped by normal prometheus service discovery ('self-metrics' port + // name). + { + source_labels: ['__meta_kubernetes_pod_container_port_name'], + regex: 'ksm', + action: 'keep', + }, + // Rename instances to the concatenation of pod:container:port. // In the specific case of KSM, we could leave out the container // name and still have a unique instance label, but we leave it diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/memcached/memcached.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/memcached/memcached.libsonnet index 3d2dda59bb9..7a3131a76d2 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/memcached/memcached.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/memcached/memcached.libsonnet @@ -23,6 +23,7 @@ k { max_item_size:: '1m', memory_limit_mb:: 1024, overprovision_factor:: 1.2, + cpu_requests:: '500m', cpu_limits:: '3', connection_limit:: 1024, memory_request_overhead_mb:: 100, @@ -30,6 +31,9 @@ k { std.ceil((self.memory_limit_mb * self.overprovision_factor) + self.memory_request_overhead_mb) * 1024 * 1024, memory_limits_bytes:: std.max(self.memory_limit_mb * 1.5 * 1024 * 1024, self.memory_request_bytes), + use_topology_spread:: false, + topology_spread_max_skew:: 1, + extended_options:: [], local container = $.core.v1.container, local containerPort = $.core.v1.containerPort, @@ -37,13 +41,16 @@ k { memcached_container:: container.new('memcached', $._images.memcached) + container.withPorts([containerPort.new('client', 11211)]) + - container.withArgs([ - '-m %(memory_limit_mb)s' % self, - '-I %(max_item_size)s' % self, - '-c %(connection_limit)s' % self, - '-v', - ]) + - $.util.resourcesRequests('500m', $.util.bytesToK8sQuantity(self.memory_request_bytes)) + + container.withArgs( + [ + '-m %(memory_limit_mb)s' % self, + '-I %(max_item_size)s' % self, + '-c %(connection_limit)s' % self, + '-v', + ] + + if std.length(self.extended_options) != 0 then ['--extended=' + std.join(',', self.extended_options)] else [] + ) + + $.util.resourcesRequests(self.cpu_requests, $.util.bytesToK8sQuantity(self.memory_request_bytes)) + $.util.resourcesLimits(self.cpu_limits, $.util.bytesToK8sQuantity(self.memory_limits_bytes)), memcached_exporter:: @@ -55,6 +62,7 @@ k { ]), local statefulSet = $.apps.v1.statefulSet, + local topologySpreadConstraints = k.core.v1.topologySpreadConstraint, statefulSet: statefulSet.new(self.name, $._config.memcached_replicas, [ @@ -62,7 +70,17 @@ k { self.memcached_exporter, ], []) + statefulSet.spec.withServiceName(self.name) + - $.util.antiAffinity, + if self.use_topology_spread then + local pod_name = self.name; + statefulSet.spec.template.spec.withTopologySpreadConstraints( + // Evenly spread pods among available nodes. + topologySpreadConstraints.labelSelector.withMatchLabels({ name: pod_name }) + + topologySpreadConstraints.withTopologyKey('kubernetes.io/hostname') + + topologySpreadConstraints.withWhenUnsatisfiable('ScheduleAnyway') + + topologySpreadConstraints.withMaxSkew(self.topology_spread_max_skew), + ) + else + $.util.antiAffinity, local service = $.core.v1.service, diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/config.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/config.libsonnet index f7411faf730..df6c8a82989 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/config.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/config.libsonnet @@ -3,7 +3,7 @@ name:: 'prometheus', // Cluster and environment specific overrides. - cluster_dns_tld: 'local', + cluster_dns_tld: 'local.', cluster_dns_suffix: 'cluster.' + self.cluster_dns_tld, namespace: error 'must specify namespace', diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/ha-mixin.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/ha-mixin.libsonnet index 92b4e36d844..38e11371d43 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/ha-mixin.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/ha-mixin.libsonnet @@ -89,6 +89,13 @@ function(replicas=2) { + container.mixin.readinessProbe.httpGet.withPort(self._config.prometheus_port) + container.mixin.readinessProbe.withInitialDelaySeconds(15) + container.mixin.readinessProbe.withTimeoutSeconds(1) + // Give 50 * 30 seconds (= 25 minutes) to start up, then start checking readiness + + container.mixin.startupProbe.httpGet.withPath('%(prometheus_path)s-/ready' % self._config) + + container.mixin.startupProbe.httpGet.withPort(self._config.prometheus_port) + + container.mixin.startupProbe.withInitialDelaySeconds(15) + + container.mixin.startupProbe.withTimeoutSeconds(1) + + container.mixin.startupProbe.withFailureThreshold(50) + + container.mixin.startupProbe.withPeriodSeconds(30) , prometheus_watch_container+:: diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/images.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/images.libsonnet index 0faaec64b85..74a7dff26c8 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/images.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/images.libsonnet @@ -1,6 +1,6 @@ { _images+:: { prometheus: 'prom/prometheus:v2.31.1', - watch: 'weaveworks/watch:master-5fc29a9', + watch: 'weaveworks/watch:master-0c44bf6', }, } diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/prometheus.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/prometheus.libsonnet index 846f31c7fc3..949bc2d044f 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/prometheus.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/prometheus/prometheus.libsonnet @@ -18,7 +18,7 @@ local kausal = import 'ksonnet-util/kausal.libsonnet'; servicePort: kausal.core.v1.service.mixin.spec.portsType, } }, rbac+: { v1+: { - policyRule: kausal.rbac.v1beta1.clusterRole.rulesType, + policyRule: kausal.rbac.v1.clusterRole.rulesType, } }, } else {} diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/README.md b/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/README.md index b460fd84c11..4781e6dea18 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/README.md +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/README.md @@ -2,7 +2,7 @@ permalink: / --- -# package tanka_util +# tanka_util ```jsonnet local tanka_util = import "github.com/grafana/jsonnet-libs/tanka-util/main.libsonnet" @@ -76,6 +76,7 @@ Kustomize functionality. * [`obj environment`](#obj-environment) * [`fn new(name, namespace, apiserver)`](#fn-environmentnew) * [`fn withApiServer(apiserver)`](#fn-environmentwithapiserver) + * [`fn withApplyStrategy(applyStrategy)`](#fn-environmentwithapplystrategy) * [`fn withData(data)`](#fn-environmentwithdata) * [`fn withDataMixin(data)`](#fn-environmentwithdatamixin) * [`fn withInjectLabels(bool)`](#fn-environmentwithinjectlabels) @@ -122,6 +123,16 @@ withApiServer(apiserver) Must be the full URL, e.g. https://cluster.fqdn:6443 +### fn environment.withApplyStrategy + +```ts +withApplyStrategy(applyStrategy) +``` + +`withApplyStrategy` sets the Kubernetes apply strategy used for this environment. +Must be `client` or `server` + + ### fn environment.withData ```ts diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/environment.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/environment.libsonnet index f273ad5ff1d..a402a46946d 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/environment.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/tanka-util/environment.libsonnet @@ -70,6 +70,22 @@ local d = import 'github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet'; }, }, + '#withApplyStrategy':: + d.fn( + ||| + `withApplyStrategy` sets the Kubernetes apply strategy used for this environment. + Must be `client` or `server` + |||, + [d.arg('applyStrategy', d.T.string)] + ), + withApplyStrategy(applyStrategy):: + local strategy = if std.member(['client', 'server'], applyStrategy) then applyStrategy else error 'applyStrategy must be `client` or `server'; + { + spec+: { + applyStrategy: strategy, + }, + }, + '#withNamespace':: d.fn( "`withNamespace` sets the default namespace for objects that don't explicitely specify one.", diff --git a/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/README.md b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/README.md index 017cec18d53..c6777425cf0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/README.md +++ b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/README.md @@ -1,26 +1,32 @@ ---- -permalink: / ---- +# doc-util -# package d +`doc-util` provides a Jsonnet interface for `docsonnet`, + a Jsonnet API doc generator that uses structured data instead of comments. + +## Install -```jsonnet -local d = import "github.com/jsonnet-libs/docsonnet/doc-util" +``` +jb install github.com/jsonnet-libs/docsonnet/doc-util@master ``` -`doc-util` provides a Jsonnet interface for `docsonnet`, - a Jsonnet API doc generator that uses structured data instead of comments. +## Usage + +```jsonnet +local d = import "github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet" +``` ## Index -* [`fn arg(name, type, default)`](#fn-arg) +* [`fn arg(name, type, default, enums)`](#fn-arg) * [`fn fn(help, args)`](#fn-fn) * [`fn obj(help, fields)`](#fn-obj) -* [`fn pkg(name, url, help)`](#fn-pkg) +* [`fn pkg(name, url, help, filename="", version="master")`](#fn-pkg) +* [`fn render(obj)`](#fn-render) * [`fn val(type, help, default)`](#fn-val) * [`obj argument`](#obj-argument) - * [`fn new(name, type, default)`](#fn-argumentnew) + * [`fn fromSchema(name, schema)`](#fn-argumentfromschema) + * [`fn new(name, type, default, enums)`](#fn-argumentnew) * [`obj func`](#obj-func) * [`fn new(help, args)`](#fn-funcnew) * [`fn withArgs(args)`](#fn-funcwithargs) @@ -28,133 +34,293 @@ local d = import "github.com/jsonnet-libs/docsonnet/doc-util" * [`obj object`](#obj-object) * [`fn new(help, fields)`](#fn-objectnew) * [`fn withFields(fields)`](#fn-objectwithfields) -* [`obj package`](#obj-package) - * [`fn new(name, url, help)`](#fn-packagenew) * [`obj value`](#obj-value) * [`fn new(type, help, default)`](#fn-valuenew) +* [`obj T`](#obj-t) +* [`obj package`](#obj-package) + * [`fn new(name, url, help, filename="", version="master")`](#fn-packagenew) + * [`fn newSub(name, help)`](#fn-packagenewsub) ## Fields ### fn arg -```ts -arg(name, type, default) +```jsonnet +arg(name, type, default, enums) ``` -`arg` is a shorthand for `argument.new` +PARAMETERS: + +* **name** (`string`) +* **type** (`string`) +* **default** (`any`) +* **enums** (`array`) +`arg` is a shorthand for `argument.new` ### fn fn -```ts +```jsonnet fn(help, args) ``` -`fn` is a shorthand for `func.new` +PARAMETERS: + +* **help** (`string`) +* **args** (`array`) +`fn` is a shorthand for `func.new` ### fn obj -```ts +```jsonnet obj(help, fields) ``` -`obj` is a shorthand for `object.new` +PARAMETERS: +* **help** (`string`) +* **fields** (`object`) + +`obj` is a shorthand for `object.new` ### fn pkg -```ts -pkg(name, url, help) +```jsonnet +pkg(name, url, help, filename="", version="master") ``` +PARAMETERS: + +* **name** (`string`) +* **url** (`string`) +* **help** (`string`) +* **filename** (`string`) + - default value: `""` +* **version** (`string`) + - default value: `"master"` + `new` is a shorthand for `package.new` +### fn render + +```jsonnet +render(obj) +``` + +PARAMETERS: + +* **obj** (`object`) + +`render` converts the docstrings to human readable Markdown files. + +Usage: + +```jsonnet +// docs.jsonnet +d.render(import 'main.libsonnet') +``` + +Call with: `jsonnet -S -c -m docs/ docs.jsonnet` ### fn val -```ts +```jsonnet val(type, help, default) ``` -`val` is a shorthand for `value.new` +PARAMETERS: -## obj argument +* **type** (`string`) +* **help** (`string`) +* **default** (`any`) + +`val` is a shorthand for `value.new` +### obj argument Utilities for creating function arguments -### fn argument.new +#### fn argument.fromSchema -```ts -new(name, type, default) +```jsonnet +argument.fromSchema(name, schema) ``` -new creates a new function argument, taking the name, the type and optionally a default value +PARAMETERS: + +* **name** (`string`) +* **schema** (`object`) + +`fromSchema` creates a new function argument, taking a JSON `schema` to describe the type information for this argument. + +Examples: + +```jsonnet +[ + d.argument.fromSchema('foo', { type: 'string' }), + d.argument.fromSchema('bar', { type: 'string', default='loo' }), + d.argument.fromSchema('baz', { type: 'number', enum=[1,2,3] }), +] +``` -## obj func +#### fn argument.new + +```jsonnet +argument.new(name, type, default, enums) +``` + +PARAMETERS: + +* **name** (`string`) +* **type** (`string`) +* **default** (`any`) +* **enums** (`array`) + +`new` creates a new function argument, taking the `name`, the `type`. Optionally it +can take a `default` value and `enum`-erate potential values. + +Examples: + +```jsonnet +[ + d.argument.new('foo', d.T.string), + d.argument.new('bar', d.T.string, default='loo'), + d.argument.new('baz', d.T.number, enums=[1,2,3]), +] +``` + +### obj func Utilities for documenting Jsonnet methods (functions of objects) -### fn func.new +#### fn func.new -```ts -new(help, args) +```jsonnet +func.new(help, args) ``` -new creates a new function, optionally with description and arguments +PARAMETERS: + +* **help** (`string`) +* **args** (`array`) -### fn func.withArgs +new creates a new function, optionally with description and arguments +#### fn func.withArgs -```ts -withArgs(args) +```jsonnet +func.withArgs(args) ``` -The `withArgs` modifier overrides the arguments of that function +PARAMETERS: + +* **args** (`array`) -### fn func.withHelp +The `withArgs` modifier overrides the arguments of that function +#### fn func.withHelp -```ts -withHelp(help) +```jsonnet +func.withHelp(help) ``` -The `withHelp` modifier overrides the help text of that function +PARAMETERS: -## obj object +* **help** (`string`) + +The `withHelp` modifier overrides the help text of that function +### obj object Utilities for documenting Jsonnet objects (`{ }`). -### fn object.new +#### fn object.new -```ts -new(help, fields) +```jsonnet +object.new(help, fields) ``` -new creates a new object, optionally with description and fields +PARAMETERS: + +* **help** (`string`) +* **fields** (`object`) -### fn object.withFields +new creates a new object, optionally with description and fields +#### fn object.withFields -```ts -withFields(fields) +```jsonnet +object.withFields(fields) ``` +PARAMETERS: + +* **fields** (`object`) + The `withFields` modifier overrides the fields property of an already created object +### obj value + +Utilities for documenting plain Jsonnet values (primitives) + +#### fn value.new + +```jsonnet +value.new(type, help, default) +``` -## obj package +PARAMETERS: +* **type** (`string`) +* **help** (`string`) +* **default** (`any`) +new creates a new object of given type, optionally with description and default value +### obj T -### fn package.new +* `T.any` (`string`): `"any"` - argument of type "any" +* `T.array` (`string`): `"array"` - argument of type "array" +* `T.boolean` (`string`): `"bool"` - argument of type "boolean" +* `T.func` (`string`): `"function"` - argument of type "func" +* `T.null` (`string`): `"null"` - argument of type "null" +* `T.number` (`string`): `"number"` - argument of type "number" +* `T.object` (`string`): `"object"` - argument of type "object" +* `T.string` (`string`): `"string"` - argument of type "string" -```ts -new(name, url, help) +### obj package + + +#### fn package.new + +```jsonnet +package.new(name, url, help, filename="", version="master") ``` -new creates a new package with given `name`, `import` URL and `help` text +PARAMETERS: -## obj value +* **name** (`string`) +* **url** (`string`) +* **help** (`string`) +* **filename** (`string`) + - default value: `""` +* **version** (`string`) + - default value: `"master"` -Utilities for documenting plain Jsonnet values (primitives) +`new` creates a new package + +Arguments: -### fn value.new +* given `name` +* source `url` for jsonnet-bundler and the import +* `help` text +* `filename` for the import, defaults to blank for backward compatibility +* `version` for jsonnet-bundler install, defaults to `master` just like jsonnet-bundler -```ts -new(type, help, default) +#### fn package.newSub + +```jsonnet +package.newSub(name, help) ``` -new creates a new object of given type, optionally with description and default value \ No newline at end of file +PARAMETERS: + +* **name** (`string`) +* **help** (`string`) + +`newSub` creates a package without the preconfigured install/usage templates. + +Arguments: + +* given `name` +* `help` text diff --git a/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet index 535badd317c..f3ec2984959 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/main.libsonnet @@ -1,21 +1,106 @@ { local d = self, - '#': d.pkg( - name='d', - url='github.com/jsonnet-libs/docsonnet/doc-util', - help=||| - `doc-util` provides a Jsonnet interface for `docsonnet`, - a Jsonnet API doc generator that uses structured data instead of comments. - ||| - ), + '#': + d.pkg( + name='doc-util', + url='github.com/jsonnet-libs/docsonnet/doc-util', + help=||| + `doc-util` provides a Jsonnet interface for `docsonnet`, + a Jsonnet API doc generator that uses structured data instead of comments. + |||, + filename=std.thisFile, + ) + + d.package.withUsageTemplate( + 'local d = import "%(import)s"' + ), package:: { - '#new':: d.fn('new creates a new package with given `name`, `import` URL and `help` text', [d.arg('name', d.T.string), d.arg('url', d.T.string), d.arg('help', d.T.string)]), - new(name, url, help):: { - name: name, - 'import': url, - help: help, + '#new':: d.fn(||| + `new` creates a new package + + Arguments: + + * given `name` + * source `url` for jsonnet-bundler and the import + * `help` text + * `filename` for the import, defaults to blank for backward compatibility + * `version` for jsonnet-bundler install, defaults to `master` just like jsonnet-bundler + |||, [ + d.arg('name', d.T.string), + d.arg('url', d.T.string), + d.arg('help', d.T.string), + d.arg('filename', d.T.string, ''), + d.arg('version', d.T.string, 'master'), + ]), + new(name, url, help, filename='', version='master'):: + { + name: name, + help: + help + + std.get(self, 'installTemplate', '') % self + + std.get(self, 'usageTemplate', '') % self, + 'import': + if filename != '' + then url + '/' + filename + else url, + url: url, + filename: filename, + version: version, + + } + + self.withInstallTemplate( + 'jb install %(url)s@%(version)s' + ) + + self.withUsageTemplate( + 'local %(name)s = import "%(import)s"' + ), + + '#newSub':: d.fn(||| + `newSub` creates a package without the preconfigured install/usage templates. + + Arguments: + + * given `name` + * `help` text + |||, [ + d.arg('name', d.T.string), + d.arg('help', d.T.string), + ]), + newSub(name, help):: + { + name: name, + help: help, + }, + + withInstallTemplate(template):: { + installTemplate: + if template != null + then + ||| + + ## Install + + ``` + %s + ``` + ||| % template + else '', + }, + + withUsageTemplate(template):: { + usageTemplate: + if template != null + then + ||| + + ## Usage + + ```jsonnet + %s + ``` + ||| % template + else '', }, }, @@ -63,47 +148,116 @@ '#argument': d.obj('Utilities for creating function arguments'), argument:: { - '#new': d.fn('new creates a new function argument, taking the name, the type and optionally a default value', [d.arg('name', d.T.string), d.arg('type', d.T.string), d.arg('default', d.T.any)]), - new(name, type, default=null): { + '#new': d.fn(||| + `new` creates a new function argument, taking the `name`, the `type`. Optionally it + can take a `default` value and `enum`-erate potential values. + + Examples: + + ```jsonnet + [ + d.argument.new('foo', d.T.string), + d.argument.new('bar', d.T.string, default='loo'), + d.argument.new('baz', d.T.number, enums=[1,2,3]), + ] + ``` + |||, [ + d.arg('name', d.T.string), + d.arg('type', d.T.string), + d.arg('default', d.T.any), + d.arg('enums', d.T.array), + ]), + new(name, type, default=null, enums=null): { name: name, type: type, default: default, + enums: enums, + }, + '#fromSchema': d.fn(||| + `fromSchema` creates a new function argument, taking a JSON `schema` to describe the type information for this argument. + + Examples: + + ```jsonnet + [ + d.argument.fromSchema('foo', { type: 'string' }), + d.argument.fromSchema('bar', { type: 'string', default='loo' }), + d.argument.fromSchema('baz', { type: 'number', enum=[1,2,3] }), + ] + ``` + |||, [ + d.arg('name', d.T.string), + d.arg('schema', d.T.object), + ]), + fromSchema(name, schema): { + name: name, + schema: schema, }, }, '#arg': self.argument['#new'] + self.func.withHelp('`arg` is a shorthand for `argument.new`'), arg:: self.argument.new, - "#value": d.obj("Utilities for documenting plain Jsonnet values (primitives)"), + '#value': d.obj('Utilities for documenting plain Jsonnet values (primitives)'), value:: { - "#new": d.fn("new creates a new object of given type, optionally with description and default value", [d.arg("type", d.T.string), d.arg("help", d.T.string), d.arg("default", d.T.any)]), - new(type, help='', default=null): { 'value': { + '#new': d.fn('new creates a new object of given type, optionally with description and default value', [d.arg('type', d.T.string), d.arg('help', d.T.string), d.arg('default', d.T.any)]), + new(type, help='', default=null): { value: { help: help, type: type, default: default, - } } + } }, }, '#val': self.value['#new'] + self.func.withHelp('`val` is a shorthand for `value.new`'), - val: self.value.new, + val:: self.value.new, // T contains constants for the Jsonnet types T:: { + '#string': d.val(d.T.string, 'argument of type "string"'), string: 'string', + '#number': d.val(d.T.string, 'argument of type "number"'), number: 'number', int: self.number, integer: self.number, + '#boolean': d.val(d.T.string, 'argument of type "boolean"'), boolean: 'bool', bool: self.boolean, + '#object': d.val(d.T.string, 'argument of type "object"'), object: 'object', + + '#array': d.val(d.T.string, 'argument of type "array"'), array: 'array', + + '#any': d.val(d.T.string, 'argument of type "any"'), any: 'any', - 'null': "null", - nil: self["null"], + '#null': d.val(d.T.string, 'argument of type "null"'), + 'null': 'null', + nil: self['null'], + '#func': d.val(d.T.string, 'argument of type "func"'), func: 'function', 'function': self.func, }, + + '#render': d.fn( + ||| + `render` converts the docstrings to human readable Markdown files. + + Usage: + + ```jsonnet + // docs.jsonnet + d.render(import 'main.libsonnet') + ``` + + Call with: `jsonnet -S -c -m docs/ docs.jsonnet` + |||, + args=[ + d.arg('obj', d.T.object), + ] + ), + render:: (import './render.libsonnet').render, + } diff --git a/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/render.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/render.libsonnet new file mode 100644 index 00000000000..758b0332945 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/docsonnet/doc-util/render.libsonnet @@ -0,0 +1,479 @@ +{ + local root = self, + + render(obj): + assert std.isObject(obj) && '#' in obj : 'error: object is not a docsonnet package'; + local package = self.package(obj); + package.toFiles(), + + findPackages(obj, path=[]): { + local find(obj, path, parentWasPackage=true) = + std.foldl( + function(acc, k) + acc + + ( + // If matches a package but warn if also has an object docstring + if '#' in obj[k] && '#' + k in obj + && !std.objectHasAll(obj[k]['#'], 'ignore') + then std.trace( + 'warning: %s both defined as object and package' % k, + [root.package(obj[k], path + [k], parentWasPackage)] + ) + // If matches a package, return it + else if '#' in obj[k] + && !std.objectHasAll(obj[k]['#'], 'ignore') + then [root.package(obj[k], path + [k], parentWasPackage)] + // If not, keep looking + else find(obj[k], path + [k], parentWasPackage=false) + ), + std.filter( + function(k) + !std.startsWith(k, '#') + && std.isObject(obj[k]), + std.objectFieldsAll(obj) + ), + [] + ), + + packages: find(obj, path), + + hasPackages(): std.length(self.packages) > 0, + + toIndex(relativeTo=[]): + if self.hasPackages() + then + std.join('\n', [ + '* ' + p.link(relativeTo) + for p in self.packages + ]) + + '\n' + else '', + + toFiles(): + std.foldl( + function(acc, p) + acc + + { [p.path]: p.toString() } + + p.packages.toFiles(), + self.packages, + {} + ), + }, + + package(obj, path=[], parentWasPackage=true): { + local this = self, + local doc = obj['#'], + + packages: root.findPackages(obj, path), + fields: root.fields(obj), + + local pathsuffix = + (if self.packages.hasPackages() + then '/index.md' + else '.md'), + + // filepath on disk + path: + std.join('/', path) + + pathsuffix, + + link(relativeTo): + local relativepath = root.util.getRelativePath(path, relativeTo); + '[%s](%s)' % [ + std.join('.', relativepath), + std.join('/', relativepath) + + pathsuffix, + ], + + toFiles(): + { 'README.md': this.toString() } + + self.packages.toFiles(), + + toString(): + std.join( + '\n', + [ + '# ' + doc.name + '\n', + std.get(doc, 'help', ''), + '', + ] + + (if self.packages.hasPackages() + then [ + '## Subpackages\n\n' + + self.packages.toIndex(path), + ] + else []) + + (if self.fields.hasFields() + then [ + '## Index\n\n' + + self.fields.toIndex() + + '\n## Fields\n' + + self.fields.toString(), + ] + else []) + ), + }, + + fields(obj, path=[]): { + values: root.findValues(obj, path), + functions: root.findFunctions(obj, path), + objects: root.findObjects(obj, path), + + hasFields(): + std.any([ + self.values.hasFields(), + self.functions.hasFields(), + self.objects.hasFields(), + ]), + + toIndex(): + std.join('', [ + self.functions.toIndex(), + self.objects.toIndex(), + ]), + + toString(): + std.join('', [ + self.values.toString(), + self.functions.toString(), + self.objects.toString(), + ]), + }, + + findObjects(obj, path=[]): { + local keys = + std.filter( + root.util.filter('object', obj), + std.objectFieldsAll(obj) + ), + + local undocumentedKeys = + std.filter( + function(k) + std.all([ + !std.startsWith(k, '#'), + std.isObject(obj[k]), + !std.objectHasAll(obj[k], 'ignore'), + !('#' + k in obj), // not documented in parent + !('#' in obj[k]), // not a sub package + ]), + std.objectFieldsAll(obj) + ), + + objects: + std.foldl( + function(acc, k) + acc + [ + root.obj( + root.util.realkey(k), + obj[k], + obj[root.util.realkey(k)], + path, + ), + ], + keys, + [] + ) + + std.foldl( + function(acc, k) + local o = root.obj( + k, + { object: { help: '' } }, + obj[k], + path, + ); + acc + + (if o.fields.hasFields() + then [o] + else []), + undocumentedKeys, + [] + ), + + hasFields(): std.length(self.objects) > 0, + + toIndex(): + if self.hasFields() + then + std.join('', [ + std.join( + '', + [' ' for d in std.range(0, (std.length(path) * 2) - 1)] + + ['* ', f.link] + + ['\n'] + + (if f.fields.hasFields() + then [f.fields.toIndex()] + else []) + ) + for f in self.objects + ]) + else '', + + toString(): + if self.hasFields() + then + std.join('', [ + o.toString() + for o in self.objects + ]) + else '', + }, + + obj(name, doc, obj, path): { + fields: root.fields(obj, path + [name]), + + path: std.join('.', path + [name]), + fragment: root.util.fragment(std.join('', path + [name])), + link: '[`obj %s`](#obj-%s)' % [name, self.fragment], + + toString(): + std.join( + '\n', + [root.util.title('obj ' + self.path, std.length(path) + 2)] + + (if std.get(doc.object, 'help', '') != '' + then [doc.object.help] + else []) + + [self.fields.toString()] + ), + }, + + findFunctions(obj, path=[]): { + local keys = + std.filter( + root.util.filter('function', obj), + std.objectFieldsAll(obj) + ), + + functions: + std.foldl( + function(acc, k) + acc + [ + root.func( + root.util.realkey(k), + obj[k], + path, + ), + ], + keys, + [] + ), + + hasFields(): std.length(self.functions) > 0, + + toIndex(): + if self.hasFields() + then + std.join('', [ + std.join( + '', + [' ' for d in std.range(0, (std.length(path) * 2) - 1)] + + ['* ', f.link] + + ['\n'] + ) + for f in self.functions + ]) + else '', + + toString(): + if self.hasFields() + then + std.join('', [ + f.toString() + for f in self.functions + ]) + else '', + }, + + func(name, doc, path): { + path: std.join('.', path + [name]), + fragment: root.util.fragment(std.join('', path + [name])), + link: '[`fn %s(%s)`](#fn-%s)' % [name, self.args, self.fragment], + + local getType(arg) = + local type = + if 'schema' in arg + then std.get(arg.schema, 'type', '') + else std.get(arg, 'type', ''); + if std.isArray(type) + then std.join(',', ['`%s`' % t for t in std.set(type)]) + else '`%s`' % type, + + // Use BelRune as default can be 'null' as a value. Only supported for arg.schema, arg.default didn't support this, not sure how to support without breaking asssumptions downstream. + local BelRune = std.char(7), + local getDefault(arg) = + if 'schema' in arg + then std.get(arg.schema, 'default', BelRune) + else + local d = std.get(arg, 'default', BelRune); + if d == null + then BelRune + else d, + + local getEnum(arg) = + if 'schema' in arg + then std.get(arg.schema, 'enum', []) + else + local d = std.get(arg, 'enums', []); + if d == null + then [] + else d, + + local manifest(value) = + std.manifestJsonEx(value, '', '', ': '), + + args: + std.join(', ', [ + local default = getDefault(arg); + if default != BelRune + then std.join('=', [ + arg.name, + manifest(default), + ]) + else arg.name + for arg in doc['function'].args + ]), + + + args_list: + if std.length(doc['function'].args) > 0 + then + '\nPARAMETERS:\n\n' + + std.join('\n', [ + '* **%s** (%s)' % [arg.name, getType(arg)] + + ( + local default = getDefault(arg); + if default != BelRune + then '\n - default value: `%s`' % manifest(default) + else '' + ) + + ( + local enum = getEnum(arg); + if enum != [] + then + '\n - valid values: %s' % + std.join(', ', [ + '`%s`' % manifest(item) + for item in enum + ]) + else '' + ) + for arg in doc['function'].args + ]) + else '', + + toString(): + std.join('\n', [ + root.util.title('fn ' + self.path, std.length(path) + 2), + ||| + ```jsonnet + %s(%s) + ``` + %s + ||| % [self.path, self.args, self.args_list], + std.get(doc['function'], 'help', ''), + ]), + }, + + findValues(obj, path=[]): { + local keys = + std.filter( + root.util.filter('value', obj), + std.objectFieldsAll(obj) + ), + + values: + std.foldl( + function(acc, k) + acc + [ + root.val( + root.util.realkey(k), + obj[k], + obj[root.util.realkey(k)], + path, + ), + ], + keys, + [] + ), + + hasFields(): std.length(self.values) > 0, + + toString(): + if self.hasFields() + then + std.join('\n', [ + '* ' + f.toString() + for f in self.values + ]) + '\n' + else '', + }, + + val(name, doc, obj, path): { + toString(): + std.join(' ', [ + '`%s`' % std.join('.', path + [name]), + '(`%s`):' % doc.value.type, + '`"%s"`' % obj, + '-', + std.get(doc.value, 'help', ''), + ]), + }, + + util: { + realkey(key): + assert std.startsWith(key, '#') : 'Key %s not a docstring key' % key; + key[1:], + title(title, depth=0): + std.join( + '', + ['\n'] + + ['#' for i in std.range(0, depth)] + + [' ', title, '\n'] + ), + fragment(title): + std.asciiLower( + std.strReplace( + std.strReplace(title, '.', '') + , ' ', '-' + ) + ), + filter(type, obj): + function(k) + std.all([ + std.startsWith(k, '#'), + std.isObject(obj[k]), + !std.objectHasAll(obj[k], 'ignore'), + type in obj[k], + root.util.realkey(k) in obj, + ]), + + getRelativePath(path, relativeTo): + local shortest = std.min(std.length(relativeTo), std.length(path)); + + local commonIndex = + std.foldl( + function(acc, i) ( + if acc.stop + then acc + else + acc + { + // stop count if path diverges + local stop = relativeTo[i] != path[i], + stop: stop, + count+: if stop then 0 else 1, + } + ), + std.range(0, shortest - 1), + { stop: false, count: 0 } + ).count; + + local _relativeTo = relativeTo[commonIndex:]; + local _path = path[commonIndex:]; + + // prefix for relative difference + local prefix = ['..' for i in std.range(0, std.length(_relativeTo) - 1)]; + + // return path with prefix + prefix + _path, + }, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet deleted file mode 100644 index 2f3e74aaaee..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet +++ /dev/null @@ -1,169 +0,0 @@ -local d = import 'doc-util/main.libsonnet'; - -{ - local container = $.core.v1.container, - local volumeMount = $.core.v1.volumeMount, - local volume = $.core.v1.volume, - - local patch = { - local volumeMountDescription = - ||| - This helper function can be augmented with a `volumeMountsMixin. For example, - passing "k.core.v1.volumeMount.withSubPath(subpath)" will result in a subpath - mixin. - |||, - - - '#configVolumeMount': d.fn( - '`configVolumeMount` mounts a ConfigMap by `name` into all container on `path`.' - + volumeMountDescription, - [ - d.arg('name', d.T.string), - d.arg('path', d.T.string), - d.arg('volumeMountMixin', d.T.object), - ] - ), - configVolumeMount(name, path, volumeMountMixin={}):: - local addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromConfigMap(name, name), - ]), - - - '#configMapVolumeMount': d.fn( - ||| - `configMapVolumeMount` mounts a `configMap` into all container on `path`. It will - also add an annotation hash to ensure the pods are re-deployed when the config map - changes. - ||| - + volumeMountDescription, - [ - d.arg('configMap', d.T.object), - d.arg('path', d.T.string), - d.arg('volumeMountMixin', d.T.object), - ] - ), - configMapVolumeMount(configMap, path, volumeMountMixin={}):: - local name = configMap.metadata.name, - hash = std.md5(std.toString(configMap)), - addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromConfigMap(name, name), - ]) + - super.spec.template.metadata.withAnnotationsMixin({ - ['%s-hash' % name]: hash, - }), - - - '#hostVolumeMount': d.fn( - '`hostVolumeMount` mounts a `hostPath` into all container on `path`.' - + volumeMountDescription, - [ - d.arg('name', d.T.string), - d.arg('hostPath', d.T.string), - d.arg('path', d.T.string), - d.arg('readOnly', d.T.bool), - d.arg('volumeMountMixin', d.T.object), - ] - ), - hostVolumeMount(name, hostPath, path, readOnly=false, volumeMountMixin={}):: - local addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path, readOnly=readOnly) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromHostPath(name, hostPath), - ]), - - - '#pvcVolumeMount': d.fn( - '`hostVolumeMount` mounts a PersistentVolumeClaim by `name` into all container on `path`.' - + volumeMountDescription, - [ - d.arg('name', d.T.string), - d.arg('path', d.T.string), - d.arg('readOnly', d.T.bool), - d.arg('volumeMountMixin', d.T.object), - ] - ), - pvcVolumeMount(name, path, readOnly=false, volumeMountMixin={}):: - local addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path, readOnly=readOnly) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromPersistentVolumeClaim(name, name), - ]), - - - '#secretVolumeMount': d.fn( - '`secretVolumeMount` mounts a Secret by `name` into all container on `path`.' - + volumeMountDescription, - [ - d.arg('name', d.T.string), - d.arg('path', d.T.string), - d.arg('defaultMode', d.T.string), - d.arg('volumeMountMixin', d.T.object), - ] - ), - secretVolumeMount(name, path, defaultMode=256, volumeMountMixin={}):: - local addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromSecret(name, secretName=name) + - volume.secret.withDefaultMode(defaultMode), - ]), - - - '#emptyVolumeMount': d.fn( - '`emptyVolumeMount` mounts empty volume by `name` into all container on `path`.' - + volumeMountDescription, - [ - d.arg('name', d.T.string), - d.arg('path', d.T.string), - d.arg('volumeMountMixin', d.T.object), - d.arg('volumeMixin', d.T.object), - ] - ), - emptyVolumeMount(name, path, volumeMountMixin={}, volumeMixin={}):: - local addMount(c) = c + container.withVolumeMountsMixin( - volumeMount.new(name, path) + - volumeMountMixin, - ); - - super.mapContainers(addMount) + - super.spec.template.spec.withVolumesMixin([ - volume.fromEmptyDir(name) + volumeMixin, - ]), - }, - - batch+: { - v1+: { - job+: patch, - }, - }, - apps+: { v1+: { - daemonSet+: patch, - deployment+: patch, - replicaSet+: patch, - statefulSet+: patch, - } }, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet deleted file mode 100644 index 8802f84a51d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet +++ /dev/null @@ -1,11 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - mutatingWebhook: (import 'mutatingWebhook.libsonnet'), - mutatingWebhookConfiguration: (import 'mutatingWebhookConfiguration.libsonnet'), - ruleWithOperations: (import 'ruleWithOperations.libsonnet'), - serviceReference: (import 'serviceReference.libsonnet'), - validatingWebhook: (import 'validatingWebhook.libsonnet'), - validatingWebhookConfiguration: (import 'validatingWebhookConfiguration.libsonnet'), - webhookClientConfig: (import 'webhookClientConfig.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet deleted file mode 100644 index 911d7310348..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet +++ /dev/null @@ -1,66 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='mutatingWebhook', url='', help='"MutatingWebhook describes an admission webhook and the resources and operations it applies to."'), - '#clientConfig':: d.obj(help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - clientConfig: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { clientConfig+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { clientConfig+: { service+: { namespace: namespace } } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { clientConfig+: { service+: { path: path } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { clientConfig+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { clientConfig+: { caBundle: caBundle } }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { clientConfig+: { url: url } }, - }, - '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - namespaceSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, - }, - '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - objectSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, - }, - '#withAdmissionReviewVersions':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersions(admissionReviewVersions): { admissionReviewVersions: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withAdmissionReviewVersionsMixin':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore."', args=[d.arg(name='failurePolicy', type=d.T.string)]), - withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, - '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Exact\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), - withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, - '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withReinvocationPolicy':: d.fn(help='"reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \\"Never\\" and \\"IfNeeded\\".\\n\\nNever: the webhook will not be called more than once in a single admission evaluation.\\n\\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\\n\\nDefaults to \\"Never\\"."', args=[d.arg(name='reinvocationPolicy', type=d.T.string)]), - withReinvocationPolicy(reinvocationPolicy): { reinvocationPolicy: reinvocationPolicy }, - '#withRules':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withSideEffects':: d.fn(help='"SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown."', args=[d.arg(name='sideEffects', type=d.T.string)]), - withSideEffects(sideEffects): { sideEffects: sideEffects }, - '#withTimeoutSeconds':: d.fn(help='"TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds."', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), - withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet deleted file mode 100644 index ed5a67a18bf..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='mutatingWebhookConfiguration', url='', help='"MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of MutatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'admissionregistration.k8s.io/v1beta1', - kind: 'MutatingWebhookConfiguration', - } + self.metadata.withName(name=name), - '#withWebhooks':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooks(webhooks): { webhooks: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#withWebhooksMixin':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooksMixin(webhooks): { webhooks+: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet deleted file mode 100644 index fb2074edb84..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='validatingWebhook', url='', help='"ValidatingWebhook describes an admission webhook and the resources and operations it applies to."'), - '#clientConfig':: d.obj(help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - clientConfig: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { clientConfig+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { clientConfig+: { service+: { namespace: namespace } } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { clientConfig+: { service+: { path: path } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { clientConfig+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { clientConfig+: { caBundle: caBundle } }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { clientConfig+: { url: url } }, - }, - '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - namespaceSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, - }, - '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - objectSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, - }, - '#withAdmissionReviewVersions':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersions(admissionReviewVersions): { admissionReviewVersions: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withAdmissionReviewVersionsMixin':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore."', args=[d.arg(name='failurePolicy', type=d.T.string)]), - withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, - '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Exact\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), - withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, - '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withRules':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withSideEffects':: d.fn(help='"SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown."', args=[d.arg(name='sideEffects', type=d.T.string)]), - withSideEffects(sideEffects): { sideEffects: sideEffects }, - '#withTimeoutSeconds':: d.fn(help='"TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds."', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), - withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet deleted file mode 100644 index 2325ced2e14..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='validatingWebhookConfiguration', url='', help='"ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ValidatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'admissionregistration.k8s.io/v1beta1', - kind: 'ValidatingWebhookConfiguration', - } + self.metadata.withName(name=name), - '#withWebhooks':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooks(webhooks): { webhooks: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#withWebhooksMixin':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooksMixin(webhooks): { webhooks+: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet deleted file mode 100644 index 88d858e5edd..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceSpec', url='', help='"APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification."'), - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { service+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { service+: { namespace: namespace } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { service+: { port: port } }, - }, - '#withCaBundle':: d.fn(help="\"CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { caBundle: caBundle }, - '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { group: group }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), - withGroupPriorityMinimum(groupPriorityMinimum): { groupPriorityMinimum: groupPriorityMinimum }, - '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), - withInsecureSkipTLSVerify(insecureSkipTLSVerify): { insecureSkipTLSVerify: insecureSkipTLSVerify }, - '#withVersion':: d.fn(help='"Version is the API version this server hosts. For example, \\"v1\\', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { version: version }, - '#withVersionPriority':: d.fn(help="\"VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \\\"kube-like\\\", it will sort above non \\\"kube-like\\\" version strings, which are ordered lexicographically. \\\"Kube-like\\\" versions start with a \\\"v\\\", then are followed by a number (the major version), then optionally the string \\\"alpha\\\" or \\\"beta\\\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.\"", args=[d.arg(name='versionPriority', type=d.T.integer)]), - withVersionPriority(versionPriority): { versionPriority: versionPriority }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet deleted file mode 100644 index f743ceb7dd2..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet +++ /dev/null @@ -1,80 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiService', url='', help='"APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\"."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of APIService', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'apiregistration.k8s.io/v1beta1', - kind: 'APIService', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification."'), - spec: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { service+: { namespace: namespace } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { spec+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { spec+: { caBundle: caBundle } }, - '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { group: group } }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), - withGroupPriorityMinimum(groupPriorityMinimum): { spec+: { groupPriorityMinimum: groupPriorityMinimum } }, - '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), - withInsecureSkipTLSVerify(insecureSkipTLSVerify): { spec+: { insecureSkipTLSVerify: insecureSkipTLSVerify } }, - '#withVersion':: d.fn(help='"Version is the API version this server hosts. For example, \\"v1\\', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { version: version } }, - '#withVersionPriority':: d.fn(help="\"VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \\\"kube-like\\\", it will sort above non \\\"kube-like\\\" version strings, which are ordered lexicographically. \\\"Kube-like\\\" versions start with a \\\"v\\\", then are followed by a number (the major version), then optionally the string \\\"alpha\\\" or \\\"beta\\\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.\"", args=[d.arg(name='versionPriority', type=d.T.integer)]), - withVersionPriority(versionPriority): { spec+: { versionPriority: versionPriority } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet deleted file mode 100644 index 88d858e5edd..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceSpec', url='', help='"APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification."'), - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { service+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { service+: { namespace: namespace } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { service+: { port: port } }, - }, - '#withCaBundle':: d.fn(help="\"CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { caBundle: caBundle }, - '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { group: group }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), - withGroupPriorityMinimum(groupPriorityMinimum): { groupPriorityMinimum: groupPriorityMinimum }, - '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), - withInsecureSkipTLSVerify(insecureSkipTLSVerify): { insecureSkipTLSVerify: insecureSkipTLSVerify }, - '#withVersion':: d.fn(help='"Version is the API version this server hosts. For example, \\"v1\\', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { version: version }, - '#withVersionPriority':: d.fn(help="\"VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \\\"kube-like\\\", it will sort above non \\\"kube-like\\\" version strings, which are ordered lexicographically. \\\"Kube-like\\\" versions start with a \\\"v\\\", then are followed by a number (the major version), then optionally the string \\\"alpha\\\" or \\\"beta\\\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.\"", args=[d.arg(name='versionPriority', type=d.T.integer)]), - withVersionPriority(versionPriority): { versionPriority: versionPriority }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet deleted file mode 100644 index 2cdc33f58df..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet +++ /dev/null @@ -1,9 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - apiService: (import 'apiService.libsonnet'), - apiServiceCondition: (import 'apiServiceCondition.libsonnet'), - apiServiceSpec: (import 'apiServiceSpec.libsonnet'), - apiServiceStatus: (import 'apiServiceStatus.libsonnet'), - serviceReference: (import 'serviceReference.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet deleted file mode 100644 index 1e01fe67d32..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rollingUpdateStatefulSetStrategy', url='', help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet deleted file mode 100644 index 5c9a07f9a3c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - tokenReview: (import 'tokenReview.libsonnet'), - tokenReviewSpec: (import 'tokenReviewSpec.libsonnet'), - tokenReviewStatus: (import 'tokenReviewStatus.libsonnet'), - userInfo: (import 'userInfo.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet deleted file mode 100644 index 5052a3363e8..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet +++ /dev/null @@ -1,65 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReview', url='', help='"TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of TokenReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authentication.k8s.io/v1beta1', - kind: 'TokenReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"TokenReviewSpec is a description of the token authentication request."'), - spec: { - '#withAudiences':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { spec+: { audiences: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withAudiencesMixin':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { spec+: { audiences+: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withToken':: d.fn(help='"Token is the opaque bearer token."', args=[d.arg(name='token', type=d.T.string)]), - withToken(token): { spec+: { token: token } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet deleted file mode 100644 index 222cdd4496d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='localSubjectAccessReview', url='', help='"LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of LocalSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'LocalSubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { spec+: { group: if std.isArray(v=group) then group else [group] } }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { spec+: { group+: if std.isArray(v=group) then group else [group] } }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { user: user } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet deleted file mode 100644 index 58919359023..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - localSubjectAccessReview: (import 'localSubjectAccessReview.libsonnet'), - nonResourceAttributes: (import 'nonResourceAttributes.libsonnet'), - nonResourceRule: (import 'nonResourceRule.libsonnet'), - resourceAttributes: (import 'resourceAttributes.libsonnet'), - resourceRule: (import 'resourceRule.libsonnet'), - selfSubjectAccessReview: (import 'selfSubjectAccessReview.libsonnet'), - selfSubjectAccessReviewSpec: (import 'selfSubjectAccessReviewSpec.libsonnet'), - selfSubjectRulesReview: (import 'selfSubjectRulesReview.libsonnet'), - selfSubjectRulesReviewSpec: (import 'selfSubjectRulesReviewSpec.libsonnet'), - subjectAccessReview: (import 'subjectAccessReview.libsonnet'), - subjectAccessReviewSpec: (import 'subjectAccessReviewSpec.libsonnet'), - subjectAccessReviewStatus: (import 'subjectAccessReviewStatus.libsonnet'), - subjectRulesReviewStatus: (import 'subjectRulesReviewStatus.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet deleted file mode 100644 index ec9d4d8468e..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet +++ /dev/null @@ -1,83 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectAccessReview', url='', help='"SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \\"in all namespaces\\". Self is a special case, because users should always be able to check whether they can perform an action"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of SelfSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SelfSubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet deleted file mode 100644 index accfb6f6c54..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReview', url='', help='"SubjectAccessReview checks whether or not a user or group can perform an action."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of SubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { spec+: { group: if std.isArray(v=group) then group else [group] } }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { spec+: { group+: if std.isArray(v=group) then group else [group] } }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { user: user } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet deleted file mode 100644 index 3a787fb74ab..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet +++ /dev/null @@ -1,42 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReviewSpec', url='', help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { nonResourceAttributes+: { path: path } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { nonResourceAttributes+: { verb: verb } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { resourceAttributes+: { group: group } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resourceAttributes+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { resourceAttributes+: { namespace: namespace } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceAttributes+: { resource: resource } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { resourceAttributes+: { subresource: subresource } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { resourceAttributes+: { verb: verb } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { resourceAttributes+: { version: version } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { group: if std.isArray(v=group) then group else [group] }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { group+: if std.isArray(v=group) then group else [group] }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet deleted file mode 100644 index 55f20dbb892..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleStatus', url='', help='"ScaleStatus represents the current status of a scale subresource."'), - '#withReplicas':: d.fn(help='"actual number of observed instances of the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), - withReplicas(replicas): { replicas: replicas }, - '#withSelector':: d.fn(help='"label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors"', args=[d.arg(name='selector', type=d.T.string)]), - withSelector(selector): { selector: selector }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet deleted file mode 100644 index 93e908d8f3f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { targetAverageUtilization: targetAverageUtilization }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet deleted file mode 100644 index 79cdfc24bf3..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { currentAverageUtilization: currentAverageUtilization }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet deleted file mode 100644 index 9c2f69c2052..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \\"target\\" type should be set."'), - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metricSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metricSelector+: { matchLabels+: matchLabels } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { targetValue: targetValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet deleted file mode 100644 index 27075d307e8..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metricSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metricSelector+: { matchLabels+: matchLabels } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { currentValue: currentValue }, - '#withMetricName':: d.fn(help='"metricName is the name of a metric used for autoscaling in metric system."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet deleted file mode 100644 index e24587f6fac..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'autoscaling/v2beta1', - kind: 'HorizontalPodAutoscaler', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), - spec: { - '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { scaleTargetRef+: { name: name } } }, - }, - '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), - withMaxReplicas(maxReplicas): { spec+: { maxReplicas: maxReplicas } }, - '#withMetrics':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."', args=[d.arg(name='metrics', type=d.T.array)]), - withMetrics(metrics): { spec+: { metrics: if std.isArray(v=metrics) then metrics else [metrics] } }, - '#withMetricsMixin':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='metrics', type=d.T.array)]), - withMetricsMixin(metrics): { spec+: { metrics+: if std.isArray(v=metrics) then metrics else [metrics] } }, - '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), - withMinReplicas(minReplicas): { spec+: { minReplicas: minReplicas } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet deleted file mode 100644 index b786b2fb152..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerSpec', url='', help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), - '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { scaleTargetRef+: { name: name } }, - }, - '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), - withMaxReplicas(maxReplicas): { maxReplicas: maxReplicas }, - '#withMetrics':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."', args=[d.arg(name='metrics', type=d.T.array)]), - withMetrics(metrics): { metrics: if std.isArray(v=metrics) then metrics else [metrics] }, - '#withMetricsMixin':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='metrics', type=d.T.array)]), - withMetricsMixin(metrics): { metrics+: if std.isArray(v=metrics) then metrics else [metrics] }, - '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), - withMinReplicas(minReplicas): { minReplicas: minReplicas }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet deleted file mode 100644 index b178832c07c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v2beta1', url='', help=''), - containerResourceMetricSource: (import 'containerResourceMetricSource.libsonnet'), - containerResourceMetricStatus: (import 'containerResourceMetricStatus.libsonnet'), - crossVersionObjectReference: (import 'crossVersionObjectReference.libsonnet'), - externalMetricSource: (import 'externalMetricSource.libsonnet'), - externalMetricStatus: (import 'externalMetricStatus.libsonnet'), - horizontalPodAutoscaler: (import 'horizontalPodAutoscaler.libsonnet'), - horizontalPodAutoscalerCondition: (import 'horizontalPodAutoscalerCondition.libsonnet'), - horizontalPodAutoscalerSpec: (import 'horizontalPodAutoscalerSpec.libsonnet'), - horizontalPodAutoscalerStatus: (import 'horizontalPodAutoscalerStatus.libsonnet'), - metricSpec: (import 'metricSpec.libsonnet'), - metricStatus: (import 'metricStatus.libsonnet'), - objectMetricSource: (import 'objectMetricSource.libsonnet'), - objectMetricStatus: (import 'objectMetricStatus.libsonnet'), - podsMetricSource: (import 'podsMetricSource.libsonnet'), - podsMetricStatus: (import 'podsMetricStatus.libsonnet'), - resourceMetricSource: (import 'resourceMetricSource.libsonnet'), - resourceMetricStatus: (import 'resourceMetricStatus.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet deleted file mode 100644 index 31c07f89e10..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - containerResource: { - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { containerResource+: { targetAverageUtilization: targetAverageUtilization } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { containerResource+: { targetAverageValue: targetAverageValue } }, - }, - '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \\"target\\" type should be set."'), - external: { - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metricSelector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metricSelector+: { matchLabels+: matchLabels } } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { external+: { metricName: metricName } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { external+: { targetAverageValue: targetAverageValue } }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { external+: { targetValue: targetValue } }, - }, - '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { target+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { target+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { target+: { name: name } } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { averageValue: averageValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { object+: { metricName: metricName } }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { object+: { targetValue: targetValue } }, - }, - '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - pods: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { pods+: { metricName: metricName } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { pods+: { targetAverageValue: targetAverageValue } }, - }, - '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - resource: { - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { resource+: { targetAverageUtilization: targetAverageUtilization } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { resource+: { targetAverageValue: targetAverageValue } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet deleted file mode 100644 index 2ecd28f3e3d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - containerResource: { - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { containerResource+: { currentAverageUtilization: currentAverageUtilization } }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { containerResource+: { currentAverageValue: currentAverageValue } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - external: { - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metricSelector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metricSelector+: { matchLabels+: matchLabels } } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { external+: { currentAverageValue: currentAverageValue } }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { external+: { currentValue: currentValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of a metric used for autoscaling in metric system."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { external+: { metricName: metricName } }, - }, - '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { target+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { target+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { target+: { name: name } } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { averageValue: averageValue } }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { object+: { currentValue: currentValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { object+: { metricName: metricName } }, - }, - '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - pods: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { pods+: { currentAverageValue: currentAverageValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { pods+: { metricName: metricName } }, - }, - '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - resource: { - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { resource+: { currentAverageUtilization: currentAverageUtilization } }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { resource+: { currentAverageValue: currentAverageValue } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet deleted file mode 100644 index a08c3167687..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { target+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { target+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { target+: { name: name } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { targetValue: targetValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet deleted file mode 100644 index 4e8b05d6ab6..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { target+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { target+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { target+: { name: name } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { currentValue: currentValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet deleted file mode 100644 index 5174952a2bb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet deleted file mode 100644 index 635cc310780..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet deleted file mode 100644 index 17626b68b52..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { targetAverageUtilization: targetAverageUtilization }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet deleted file mode 100644 index 8a870c1eb84..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { currentAverageUtilization: currentAverageUtilization }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet deleted file mode 100644 index 531a0b6c6a7..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet deleted file mode 100644 index b0c77e47e67..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#withContainer':: d.fn(help='"Container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet deleted file mode 100644 index ce91a7acb47..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet deleted file mode 100644 index d0c9b2884a1..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet deleted file mode 100644 index cc0b990cd2a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet +++ /dev/null @@ -1,141 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - containerResource: { - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { containerResource+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { containerResource+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { containerResource+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { containerResource+: { target+: { value: value } } }, - }, - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), - external: { - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { external+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { external+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { external+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { external+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { external+: { target+: { value: value } } }, - }, - }, - '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { describedObject+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { describedObject+: { name: name } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { object+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { object+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { object+: { target+: { value: value } } }, - }, - }, - '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - pods: { - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { pods+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { pods+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { pods+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { pods+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { pods+: { target+: { value: value } } }, - }, - }, - '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - resource: { - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { resource+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { resource+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { resource+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { resource+: { target+: { value: value } } }, - }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet deleted file mode 100644 index 0b823f71881..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet +++ /dev/null @@ -1,131 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - containerResource: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { containerResource+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { containerResource+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { containerResource+: { current+: { value: value } } }, - }, - '#withContainer':: d.fn(help='"Container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - external: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { external+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { external+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { external+: { current+: { value: value } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { external+: { metric+: { name: name } } }, - }, - }, - '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { object+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { object+: { current+: { value: value } } }, - }, - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { describedObject+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { describedObject+: { name: name } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { metric+: { name: name } } }, - }, - }, - '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - pods: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { pods+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { pods+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { pods+: { current+: { value: value } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { pods+: { metric+: { name: name } } }, - }, - }, - '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - resource: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { resource+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { resource+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { resource+: { current+: { value: value } } }, - }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet deleted file mode 100644 index 63672a4066b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricTarget', url='', help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet deleted file mode 100644 index 5f220873496..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricValueStatus', url='', help='"MetricValueStatus holds the current value for a metric"'), - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet deleted file mode 100644 index 1639bfcb0f3..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet +++ /dev/null @@ -1,42 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { describedObject+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { describedObject+: { name: name } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet deleted file mode 100644 index bf16df5f94c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet +++ /dev/null @@ -1,40 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { describedObject+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { describedObject+: { name: name } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet deleted file mode 100644 index e21f772169d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet deleted file mode 100644 index 28c40525e2c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet deleted file mode 100644 index c73209bea8b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet deleted file mode 100644 index ea377dc7cb0..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet deleted file mode 100644 index 5b4af07f25b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet +++ /dev/null @@ -1,320 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='jobTemplateSpec', url='', help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { template+: { spec+: { hostIPC: hostIPC } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { template+: { spec+: { nodeName: nodeName } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { spec+: { template+: { spec+: { subdomain: subdomain } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { spec+: { completions: completions } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet deleted file mode 100644 index ed443054b55..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet +++ /dev/null @@ -1,388 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJob', url='', help='"CronJob represents the configuration of a single cron job."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CronJob', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'batch/v1beta1', - kind: 'CronJob', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CronJobSpec describes how the job execution will look like and when it will actually run."'), - spec: { - '#jobTemplate':: d.obj(help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - jobTemplate: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { jobTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { metadata+: { clusterName: clusterName } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { jobTemplate+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { jobTemplate+: { metadata+: { deletionTimestamp: deletionTimestamp } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { jobTemplate+: { metadata+: { generateName: generateName } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { jobTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { jobTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { jobTemplate+: { metadata+: { labels+: labels } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { jobTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { jobTemplate+: { metadata+: { namespace: namespace } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { jobTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { jobTemplate+: { metadata+: { uid: uid } } } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { jobTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { jobTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { jobTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { jobTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostIPC: hostIPC } } } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeName: nodeName } } } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { subdomain: subdomain } } } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { spec+: { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { spec+: { jobTemplate+: { spec+: { completionMode: completionMode } } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { spec+: { jobTemplate+: { spec+: { completions: completions } } } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { spec+: { jobTemplate+: { spec+: { manualSelector: manualSelector } } } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { spec+: { jobTemplate+: { spec+: { parallelism: parallelism } } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { jobTemplate+: { spec+: { suspend: suspend } } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } } }, - }, - }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), - withConcurrencyPolicy(concurrencyPolicy): { spec+: { concurrencyPolicy: concurrencyPolicy } }, - '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), - withFailedJobsHistoryLimit(failedJobsHistoryLimit): { spec+: { failedJobsHistoryLimit: failedJobsHistoryLimit } }, - '#withSchedule':: d.fn(help='"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron."', args=[d.arg(name='schedule', type=d.T.string)]), - withSchedule(schedule): { spec+: { schedule: schedule } }, - '#withStartingDeadlineSeconds':: d.fn(help='"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones."', args=[d.arg(name='startingDeadlineSeconds', type=d.T.integer)]), - withStartingDeadlineSeconds(startingDeadlineSeconds): { spec+: { startingDeadlineSeconds: startingDeadlineSeconds } }, - '#withSuccessfulJobsHistoryLimit':: d.fn(help='"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3."', args=[d.arg(name='successfulJobsHistoryLimit', type=d.T.integer)]), - withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { spec+: { successfulJobsHistoryLimit: successfulJobsHistoryLimit } }, - '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { suspend: suspend } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet deleted file mode 100644 index 2ed68fed69e..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet +++ /dev/null @@ -1,335 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJobSpec', url='', help='"CronJobSpec describes how the job execution will look like and when it will actually run."'), - '#jobTemplate':: d.obj(help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - jobTemplate: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { jobTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { jobTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { metadata+: { clusterName: clusterName } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { jobTemplate+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { jobTemplate+: { metadata+: { deletionTimestamp: deletionTimestamp } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { jobTemplate+: { metadata+: { generateName: generateName } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { jobTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { jobTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { jobTemplate+: { metadata+: { labels+: labels } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { jobTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { jobTemplate+: { metadata+: { namespace: namespace } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { jobTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { jobTemplate+: { metadata+: { uid: uid } } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { jobTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { jobTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { jobTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { jobTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { jobTemplate+: { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { jobTemplate+: { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { jobTemplate+: { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { jobTemplate+: { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { jobTemplate+: { spec+: { template+: { spec+: { hostIPC: hostIPC } } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { jobTemplate+: { spec+: { template+: { spec+: { nodeName: nodeName } } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { jobTemplate+: { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { jobTemplate+: { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { jobTemplate+: { spec+: { template+: { spec+: { subdomain: subdomain } } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { jobTemplate+: { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { jobTemplate+: { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { jobTemplate+: { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { jobTemplate+: { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { jobTemplate+: { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { jobTemplate+: { spec+: { completionMode: completionMode } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { jobTemplate+: { spec+: { completions: completions } } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { jobTemplate+: { spec+: { manualSelector: manualSelector } } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { jobTemplate+: { spec+: { parallelism: parallelism } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { jobTemplate+: { spec+: { suspend: suspend } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } }, - }, - }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), - withConcurrencyPolicy(concurrencyPolicy): { concurrencyPolicy: concurrencyPolicy }, - '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), - withFailedJobsHistoryLimit(failedJobsHistoryLimit): { failedJobsHistoryLimit: failedJobsHistoryLimit }, - '#withSchedule':: d.fn(help='"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron."', args=[d.arg(name='schedule', type=d.T.string)]), - withSchedule(schedule): { schedule: schedule }, - '#withStartingDeadlineSeconds':: d.fn(help='"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones."', args=[d.arg(name='startingDeadlineSeconds', type=d.T.integer)]), - withStartingDeadlineSeconds(startingDeadlineSeconds): { startingDeadlineSeconds: startingDeadlineSeconds }, - '#withSuccessfulJobsHistoryLimit':: d.fn(help='"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3."', args=[d.arg(name='successfulJobsHistoryLimit', type=d.T.integer)]), - withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { successfulJobsHistoryLimit: successfulJobsHistoryLimit }, - '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { suspend: suspend }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet deleted file mode 100644 index e9498f60d44..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - cronJob: (import 'cronJob.libsonnet'), - cronJobSpec: (import 'cronJobSpec.libsonnet'), - cronJobStatus: (import 'cronJobStatus.libsonnet'), - jobTemplateSpec: (import 'jobTemplateSpec.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet deleted file mode 100644 index b0e900695eb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet +++ /dev/null @@ -1,79 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequest', url='', help='"Describes a certificate signing request"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CertificateSigningRequest', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'certificates.k8s.io/v1beta1', - kind: 'CertificateSigningRequest', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users."'), - spec: { - '#withExtra':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroups':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { spec+: { groups: if std.isArray(v=groups) then groups else [groups] } }, - '#withGroupsMixin':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { spec+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, - '#withRequest':: d.fn(help='"Base64-encoded PKCS#10 CSR data"', args=[d.arg(name='request', type=d.T.string)]), - withRequest(request): { spec+: { request: request } }, - '#withSignerName':: d.fn(help="\"Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\\n 1. If it's a kubelet client certificate, it is assigned\\n \\\"kubernetes.io/kube-apiserver-client-kubelet\\\".\\n 2. If it's a kubelet serving certificate, it is assigned\\n \\\"kubernetes.io/kubelet-serving\\\".\\n 3. Otherwise, it is assigned \\\"kubernetes.io/legacy-unknown\\\".\\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.\"", args=[d.arg(name='signerName', type=d.T.string)]), - withSignerName(signerName): { spec+: { signerName: signerName } }, - '#withUid':: d.fn(help='"UID information about the requesting user. See user.Info interface for details."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUsages':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\', args=[d.arg(name='usages', type=d.T.array)]), - withUsages(usages): { spec+: { usages: if std.isArray(v=usages) then usages else [usages] } }, - '#withUsagesMixin':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='usages', type=d.T.array)]), - withUsagesMixin(usages): { spec+: { usages+: if std.isArray(v=usages) then usages else [usages] } }, - '#withUsername':: d.fn(help='"Information about the requesting user. See user.Info interface for details."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { spec+: { username: username } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet deleted file mode 100644 index bd83485168a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestCondition', url='', help=''), - '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), - withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withLastUpdateTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastUpdateTime', type=d.T.string)]), - withLastUpdateTime(lastUpdateTime): { lastUpdateTime: lastUpdateTime }, - '#withMessage':: d.fn(help='"human readable message with details about the request state"', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withReason':: d.fn(help='"brief reason for the request state"', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"type of the condition. Known conditions include \\"Approved\\", \\"Denied\\", and \\"Failed\\"."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet deleted file mode 100644 index 42034432592..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestSpec', url='', help='"This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users."'), - '#withExtra':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroups':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { groups: if std.isArray(v=groups) then groups else [groups] }, - '#withGroupsMixin':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { groups+: if std.isArray(v=groups) then groups else [groups] }, - '#withRequest':: d.fn(help='"Base64-encoded PKCS#10 CSR data"', args=[d.arg(name='request', type=d.T.string)]), - withRequest(request): { request: request }, - '#withSignerName':: d.fn(help="\"Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\\n 1. If it's a kubelet client certificate, it is assigned\\n \\\"kubernetes.io/kube-apiserver-client-kubelet\\\".\\n 2. If it's a kubelet serving certificate, it is assigned\\n \\\"kubernetes.io/kubelet-serving\\\".\\n 3. Otherwise, it is assigned \\\"kubernetes.io/legacy-unknown\\\".\\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.\"", args=[d.arg(name='signerName', type=d.T.string)]), - withSignerName(signerName): { signerName: signerName }, - '#withUid':: d.fn(help='"UID information about the requesting user. See user.Info interface for details."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUsages':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\', args=[d.arg(name='usages', type=d.T.array)]), - withUsages(usages): { usages: if std.isArray(v=usages) then usages else [usages] }, - '#withUsagesMixin':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='usages', type=d.T.array)]), - withUsagesMixin(usages): { usages+: if std.isArray(v=usages) then usages else [usages] }, - '#withUsername':: d.fn(help='"Information about the requesting user. See user.Info interface for details."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { username: username }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet deleted file mode 100644 index 0a36898fd8a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestStatus', url='', help=''), - '#withCertificate':: d.fn(help='"If request was approved, the controller will place the issued certificate here."', args=[d.arg(name='certificate', type=d.T.string)]), - withCertificate(certificate): { certificate: certificate }, - '#withConditions':: d.fn(help='"Conditions applied to the request, such as approval or denial."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"Conditions applied to the request, such as approval or denial."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet deleted file mode 100644 index 0e09e4b298a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - certificateSigningRequest: (import 'certificateSigningRequest.libsonnet'), - certificateSigningRequestCondition: (import 'certificateSigningRequestCondition.libsonnet'), - certificateSigningRequestSpec: (import 'certificateSigningRequestSpec.libsonnet'), - certificateSigningRequestStatus: (import 'certificateSigningRequestStatus.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet deleted file mode 100644 index 12f30209845..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet +++ /dev/null @@ -1,6 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - lease: (import 'lease.libsonnet'), - leaseSpec: (import 'leaseSpec.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet deleted file mode 100644 index c89b3f3aa6f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='awsElasticBlockStoreVolumeSource', url='', help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { volumeID: volumeID }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet deleted file mode 100644 index 3a6f4e8c227..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureDiskVolumeSource', url='', help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { cachingMode: cachingMode }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { diskName: diskName }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { diskURI: diskURI }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet deleted file mode 100644 index abb1082ef8b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureFilePersistentVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { secretNamespace: secretNamespace }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { shareName: shareName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet deleted file mode 100644 index 14b0454c3c1..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureFileVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { shareName: shareName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet deleted file mode 100644 index 4ac47d531b2..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,25 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cephFSPersistentVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { secretFile: secretFile }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet deleted file mode 100644 index d9ad4242284..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cephFSVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { secretFile: secretFile }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet deleted file mode 100644 index 600c4bef519..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapProjection', url='', help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet deleted file mode 100644 index e6cfbc58052..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapVolumeSource', url='', help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet deleted file mode 100644 index 300afde2151..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='downwardAPIVolumeFile', url='', help='"DownwardAPIVolumeFile represents information to create the file containing the pod field"'), - '#fieldRef':: d.obj(help='"ObjectFieldSelector selects an APIVersioned field of an object."'), - fieldRef: { - '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, - }, - '#resourceFieldRef':: d.obj(help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), - resourceFieldRef: { - '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), - withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), - withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, - '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceFieldRef+: { resource: resource } }, - }, - '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), - withMode(mode): { mode: mode }, - '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet deleted file mode 100644 index a434c5676c4..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='emptyDirVolumeSource', url='', help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), - '#withMedium':: d.fn(help="\"What type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), - withMedium(medium): { medium: medium }, - '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), - withSizeLimit(sizeLimit): { sizeLimit: sizeLimit }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet deleted file mode 100644 index 5befc0a18cb..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort is a tuple that describes a single port."'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet deleted file mode 100644 index 3fb419cb7d5..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ephemeralContainers', url='', help='"A list of ephemeral containers used with the Pod ephemeralcontainers subresource."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of EphemeralContainers', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'v1', - kind: 'EphemeralContainers', - } + self.metadata.withName(name=name), - '#withEphemeralContainers':: d.fn(help='"A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified."', args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#withEphemeralContainersMixin':: d.fn(help='"A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet deleted file mode 100644 index d8e0a809fcf..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='fcVolumeSource', url='', help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { wwids: if std.isArray(v=wwids) then wwids else [wwids] }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { wwids+: if std.isArray(v=wwids) then wwids else [wwids] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet deleted file mode 100644 index 4ca3872013f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='gcePersistentDiskVolumeSource', url='', help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { pdName: pdName }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet deleted file mode 100644 index f0640bd1024..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,35 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='iscsiPersistentVolumeSource', url='', help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { initiatorName: initiatorName }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { iqn: iqn }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { targetPortal: targetPortal }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet deleted file mode 100644 index d834c1db0e6..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='iscsiVolumeSource', url='', help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { initiatorName: initiatorName }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { iqn: iqn }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { targetPortal: targetPortal }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet deleted file mode 100644 index 5eb6b89ac72..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='keyToPath', url='', help='"Maps a string key to a path within a volume."'), - '#withKey':: d.fn(help='"The key to project."', args=[d.arg(name='key', type=d.T.string)]), - withKey(key): { key: key }, - '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), - withMode(mode): { mode: mode }, - '#withPath':: d.fn(help="\"The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet deleted file mode 100644 index e5a0a1ece98..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='localVolumeSource', url='', help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet deleted file mode 100644 index 36b3fe45775..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeClaimStatus', url='', help='"PersistentVolumeClaimStatus is the current status of a persistent volume claim."'), - '#withAccessModes':: d.fn(help='"AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withCapacity':: d.fn(help='"Represents the actual resources of the underlying volume."', args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { capacity: capacity }, - '#withCapacityMixin':: d.fn(help='"Represents the actual resources of the underlying volume."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { capacity+: capacity }, - '#withConditions':: d.fn(help="\"Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"", args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help="\"Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withPhase':: d.fn(help='"Phase represents the current phase of PersistentVolumeClaim."', args=[d.arg(name='phase', type=d.T.string)]), - withPhase(phase): { phase: phase }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet deleted file mode 100644 index 6e68b8c42a2..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeStatus', url='', help='"PersistentVolumeStatus is the current status of a persistent volume."'), - '#withMessage':: d.fn(help='"A human-readable message indicating details about why the volume is in this state."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withPhase':: d.fn(help='"Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase"', args=[d.arg(name='phase', type=d.T.string)]), - withPhase(phase): { phase: phase }, - '#withReason':: d.fn(help='"Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet deleted file mode 100644 index b05e0d0eaec..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='photonPersistentDiskVolumeSource', url='', help='"Represents a Photon Controller persistent disk resource."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { pdID: pdID }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet deleted file mode 100644 index 08e93f9fc9e..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podIP', url='', help='"IP address information for entries in the (plural) PodIPs field. Each entry includes:\\n IP: An IP address allocated to the pod. Routable at least within the cluster."'), - '#withIp':: d.fn(help='"ip is an IP address (IPv4 or IPv6) assigned to the pod"', args=[d.arg(name='ip', type=d.T.string)]), - withIp(ip): { ip: ip }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet deleted file mode 100644 index 4c01eb71937..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='projectedVolumeSource', url='', help='"Represents a projected volume source"'), - '#withDefaultMode':: d.fn(help='"Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withSources':: d.fn(help='"list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), - withSources(sources): { sources: if std.isArray(v=sources) then sources else [sources] }, - '#withSourcesMixin':: d.fn(help='"list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), - withSourcesMixin(sources): { sources+: if std.isArray(v=sources) then sources else [sources] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet deleted file mode 100644 index 4163da234a4..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rbdPersistentVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { image: image }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { keyring: keyring }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { pool: pool }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet deleted file mode 100644 index cba0fe8c954..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rbdVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { image: image }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { keyring: keyring }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { pool: pool }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet deleted file mode 100644 index 460a6016386..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceFieldSelector', url='', help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), - '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), - withContainerName(containerName): { containerName: containerName }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), - withDivisor(divisor): { divisor: divisor }, - '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resource: resource }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet deleted file mode 100644 index a3a52807f06..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleIOPersistentVolumeSource', url='', help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { gateway: gateway }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { storageMode: storageMode }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { storagePool: storagePool }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { system: system }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { volumeName: volumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet deleted file mode 100644 index 376c45a382d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleIOVolumeSource', url='', help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { gateway: gateway }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { storageMode: storageMode }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { storagePool: storagePool }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { system: system }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { volumeName: volumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet deleted file mode 100644 index 3750b1ee35f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='secretProjection', url='', help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet deleted file mode 100644 index 1097514d560..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='secretVolumeSource', url='', help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#withSecretName':: d.fn(help="\"Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet deleted file mode 100644 index 0b27df18641..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='topologySpreadConstraint', url='', help='"TopologySpreadConstraint specifies how to spread matching pods among the given topology."'), - '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - labelSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, - }, - '#withMaxSkew':: d.fn(help="\"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.\"", args=[d.arg(name='maxSkew', type=d.T.integer)]), - withMaxSkew(maxSkew): { maxSkew: maxSkew }, - '#withTopologyKey':: d.fn(help="\"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \\\"bucket\\\", and try to put balanced number of pods into each bucket. It's a required field.\"", args=[d.arg(name='topologyKey', type=d.T.string)]), - withTopologyKey(topologyKey): { topologyKey: topologyKey }, - '#withWhenUnsatisfiable':: d.fn(help="\"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\\n but giving higher precedence to topologies that would help reduce the\\n skew.\\nA constraint is considered \\\"Unsatisfiable\\\" for an incoming pod if and only if every possible node assigment for that pod would violate \\\"MaxSkew\\\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\"", args=[d.arg(name='whenUnsatisfiable', type=d.T.string)]), - withWhenUnsatisfiable(whenUnsatisfiable): { whenUnsatisfiable: whenUnsatisfiable }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet deleted file mode 100644 index 43fafb15bf7..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet +++ /dev/null @@ -1,44 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeProjection', url='', help='"Projection that may be projected along with other supported volume types"'), - '#configMap':: d.obj(help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), - configMap: { - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { configMap+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { configMap+: { optional: optional } }, - }, - '#downwardAPI':: d.obj(help='"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode."'), - downwardAPI: { - '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, - }, - '#secret':: d.obj(help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), - secret: { - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secret+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { secret+: { optional: optional } }, - }, - '#serviceAccountToken':: d.obj(help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), - serviceAccountToken: { - '#withAudience':: d.fn(help='"Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), - withAudience(audience): { serviceAccountToken+: { audience: audience } }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), - withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, - '#withPath':: d.fn(help='"Path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { serviceAccountToken+: { path: path } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet deleted file mode 100644 index 5e742debbfe..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='vsphereVirtualDiskVolumeSource', url='', help='"Represents a vSphere volume resource."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { storagePolicyID: storagePolicyID }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { storagePolicyName: storagePolicyName }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { volumePath: volumePath }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet deleted file mode 100644 index 28e60c00971..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet deleted file mode 100644 index 7f2ac440358..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet +++ /dev/null @@ -1,51 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpoint', url='', help='"Endpoint represents a single logical \\"backend\\" implementing a service."'), - '#conditions':: d.obj(help='"EndpointConditions represents the current condition of an endpoint."'), - conditions: { - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), - withReady(ready): { conditions+: { ready: ready } }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), - withServing(serving): { conditions+: { serving: serving } }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), - withTerminating(terminating): { conditions+: { terminating: terminating } }, - }, - '#hints':: d.obj(help='"EndpointHints provides hints describing how an endpoint should be consumed."'), - hints: { - '#withForZones':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."', args=[d.arg(name='forZones', type=d.T.array)]), - withForZones(forZones): { hints+: { forZones: if std.isArray(v=forZones) then forZones else [forZones] } }, - '#withForZonesMixin':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forZones', type=d.T.array)]), - withForZonesMixin(forZones): { hints+: { forZones+: if std.isArray(v=forZones) then forZones else [forZones] } }, - }, - '#targetRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - targetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { targetRef+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { targetRef+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { targetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { targetRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { targetRef+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { targetRef+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { targetRef+: { uid: uid } }, - }, - '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."', args=[d.arg(name='addresses', type=d.T.array)]), - withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), - withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withHostname':: d.fn(help='"hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation."', args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { hostname: hostname }, - '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#withTopology':: d.fn(help='"topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\\n where the endpoint is located. This should match the corresponding\\n node label.\\n* topology.kubernetes.io/zone: the value indicates the zone where the\\n endpoint is located. This should match the corresponding node label.\\n* topology.kubernetes.io/region: the value indicates the region where the\\n endpoint is located. This should match the corresponding node label.\\nThis field is deprecated and will be removed in future api versions."', args=[d.arg(name='topology', type=d.T.object)]), - withTopology(topology): { topology: topology }, - '#withTopologyMixin':: d.fn(help='"topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\\n where the endpoint is located. This should match the corresponding\\n node label.\\n* topology.kubernetes.io/zone: the value indicates the zone where the\\n endpoint is located. This should match the corresponding node label.\\n* topology.kubernetes.io/region: the value indicates the region where the\\n endpoint is located. This should match the corresponding node label.\\nThis field is deprecated and will be removed in future api versions."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topology', type=d.T.object)]), - withTopologyMixin(topology): { topology+: topology }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet deleted file mode 100644 index 5a08f1eba3e..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointHints', url='', help='"EndpointHints provides hints describing how an endpoint should be consumed."'), - '#withForZones':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."', args=[d.arg(name='forZones', type=d.T.array)]), - withForZones(forZones): { forZones: if std.isArray(v=forZones) then forZones else [forZones] }, - '#withForZonesMixin':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forZones', type=d.T.array)]), - withForZonesMixin(forZones): { forZones+: if std.isArray(v=forZones) then forZones else [forZones] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet deleted file mode 100644 index 28e60c00971..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet deleted file mode 100644 index 5e5020c85ef..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet +++ /dev/null @@ -1,66 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointSlice', url='', help='"EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of EndpointSlice', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'discovery.k8s.io/v1beta1', - kind: 'EndpointSlice', - } + self.metadata.withName(name=name), - '#withAddressType':: d.fn(help='"addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name."', args=[d.arg(name='addressType', type=d.T.string)]), - withAddressType(addressType): { addressType: addressType }, - '#withEndpoints':: d.fn(help='"endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints."', args=[d.arg(name='endpoints', type=d.T.array)]), - withEndpoints(endpoints): { endpoints: if std.isArray(v=endpoints) then endpoints else [endpoints] }, - '#withEndpointsMixin':: d.fn(help='"endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='endpoints', type=d.T.array)]), - withEndpointsMixin(endpoints): { endpoints+: if std.isArray(v=endpoints) then endpoints else [endpoints] }, - '#withPorts':: d.fn(help='"ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \\"all ports\\". Each slice may include a maximum of 100 ports."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \\"all ports\\". Each slice may include a maximum of 100 ports."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet deleted file mode 100644 index ee824bf0c7c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - endpoint: (import 'endpoint.libsonnet'), - endpointConditions: (import 'endpointConditions.libsonnet'), - endpointHints: (import 'endpointHints.libsonnet'), - endpointPort: (import 'endpointPort.libsonnet'), - endpointSlice: (import 'endpointSlice.libsonnet'), - forZone: (import 'forZone.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet deleted file mode 100644 index e94f5caacdc..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet +++ /dev/null @@ -1,124 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='event', url='', help='"Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data."'), - '#deprecatedSource':: d.obj(help='"EventSource contains information for an event."'), - deprecatedSource: { - '#withComponent':: d.fn(help='"Component from which the event is generated."', args=[d.arg(name='component', type=d.T.string)]), - withComponent(component): { deprecatedSource+: { component: component } }, - '#withHost':: d.fn(help='"Node name on which the event is generated."', args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { deprecatedSource+: { host: host } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'events.k8s.io/v1beta1', - kind: 'Event', - } + self.metadata.withName(name=name), - '#regarding':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - regarding: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { regarding+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { regarding+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { regarding+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { regarding+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { regarding+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { regarding+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { regarding+: { uid: uid } }, - }, - '#related':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - related: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { related+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { related+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { related+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { related+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { related+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { related+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { related+: { uid: uid } }, - }, - '#series':: d.obj(help='"EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time."'), - series: { - '#withCount':: d.fn(help='"count is the number of occurrences in this series up to the last heartbeat time."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { series+: { count: count } }, - '#withLastObservedTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='lastObservedTime', type=d.T.string)]), - withLastObservedTime(lastObservedTime): { series+: { lastObservedTime: lastObservedTime } }, - }, - '#withAction':: d.fn(help='"action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters."', args=[d.arg(name='action', type=d.T.string)]), - withAction(action): { action: action }, - '#withDeprecatedCount':: d.fn(help='"deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type."', args=[d.arg(name='deprecatedCount', type=d.T.integer)]), - withDeprecatedCount(deprecatedCount): { deprecatedCount: deprecatedCount }, - '#withDeprecatedFirstTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deprecatedFirstTimestamp', type=d.T.string)]), - withDeprecatedFirstTimestamp(deprecatedFirstTimestamp): { deprecatedFirstTimestamp: deprecatedFirstTimestamp }, - '#withDeprecatedLastTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deprecatedLastTimestamp', type=d.T.string)]), - withDeprecatedLastTimestamp(deprecatedLastTimestamp): { deprecatedLastTimestamp: deprecatedLastTimestamp }, - '#withEventTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='eventTime', type=d.T.string)]), - withEventTime(eventTime): { eventTime: eventTime }, - '#withNote':: d.fn(help='"note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB."', args=[d.arg(name='note', type=d.T.string)]), - withNote(note): { note: note }, - '#withReason':: d.fn(help='"reason is why the action was taken. It is human-readable. This field can have at most 128 characters."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withReportingController':: d.fn(help='"reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events."', args=[d.arg(name='reportingController', type=d.T.string)]), - withReportingController(reportingController): { reportingController: reportingController }, - '#withReportingInstance':: d.fn(help='"reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters."', args=[d.arg(name='reportingInstance', type=d.T.string)]), - withReportingInstance(reportingInstance): { reportingInstance: reportingInstance }, - '#withType':: d.fn(help='"type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet deleted file mode 100644 index 9cdc01a276f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='eventSeries', url='', help='"EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time."'), - '#withCount':: d.fn(help='"count is the number of occurrences in this series up to the last heartbeat time."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#withLastObservedTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='lastObservedTime', type=d.T.string)]), - withLastObservedTime(lastObservedTime): { lastObservedTime: lastObservedTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet deleted file mode 100644 index 34c50897a1c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet +++ /dev/null @@ -1,6 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - event: (import 'event.libsonnet'), - eventSeries: (import 'eventSeries.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet deleted file mode 100644 index d67bd0cf44e..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='extensions', url='', help=''), - v1beta1: (import 'v1beta1/main.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet deleted file mode 100644 index 2801551ab04..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressPath', url='', help='"HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types. Defaults to ImplementationSpecific.\"", args=[d.arg(name='pathType', type=d.T.string)]), - withPathType(pathType): { pathType: pathType }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet deleted file mode 100644 index 572a5508f32..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet +++ /dev/null @@ -1,85 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'extensions/v1beta1', - kind: 'Ingress', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressSpec describes the Ingress the user wishes to exist."'), - spec: { - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { backend+: { resource+: { apiGroup: apiGroup } } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { backend+: { resource+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { backend+: { resource+: { name: name } } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { spec+: { backend+: { serviceName: serviceName } } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { spec+: { backend+: { servicePort: servicePort } } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet deleted file mode 100644 index ee01f37e01d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressBackend', url='', help='"IngressBackend describes all endpoints for a given service and port."'), - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { resource+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { resource+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { serviceName: serviceName }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { servicePort: servicePort }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet deleted file mode 100644 index 35156d74036..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressSpec', url='', help='"IngressSpec describes the Ingress the user wishes to exist."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet deleted file mode 100644 index 21adb5950ec..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet deleted file mode 100644 index c12f3f9af7c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - httpIngressPath: (import 'httpIngressPath.libsonnet'), - httpIngressRuleValue: (import 'httpIngressRuleValue.libsonnet'), - ingress: (import 'ingress.libsonnet'), - ingressBackend: (import 'ingressBackend.libsonnet'), - ingressRule: (import 'ingressRule.libsonnet'), - ingressSpec: (import 'ingressSpec.libsonnet'), - ingressStatus: (import 'ingressStatus.libsonnet'), - ingressTLS: (import 'ingressTLS.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet deleted file mode 100644 index 7b00ffa7b5d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), - '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), - limited: { - '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), - limitResponse: { - '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), - queuing: { - '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), - withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, - '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), - withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, - '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), - withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, - }, - '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { limited+: { limitResponse+: { type: type } } }, - }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { limited+: { assuredConcurrencyShares: assuredConcurrencyShares } }, - }, - '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet deleted file mode 100644 index a54d695b4fc..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet deleted file mode 100644 index 95d59bcef55..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet deleted file mode 100644 index dc95536195d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ipBlock', url='', help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.1/24\\\",\\\"2001:db9::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), - '#withCidr':: d.fn(help='"CIDR is a string representing the IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), - withCidr(cidr): { cidr: cidr }, - '#withExcept':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"', args=[d.arg(name='except', type=d.T.array)]), - withExcept(except): { except: if std.isArray(v=except) then except else [except] }, - '#withExceptMixin':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), - withExceptMixin(except): { except+: if std.isArray(v=except) then except else [except] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet deleted file mode 100644 index 155450ad6c3..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyEgressRule', url='', help="\"NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8\""), - '#withPorts':: d.fn(help='"List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#withTo':: d.fn(help='"List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."', args=[d.arg(name='to', type=d.T.array)]), - withTo(to): { to: if std.isArray(v=to) then to else [to] }, - '#withToMixin':: d.fn(help='"List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='to', type=d.T.array)]), - withToMixin(to): { to+: if std.isArray(v=to) then to else [to] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet deleted file mode 100644 index 9d244ee6819..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyIngressRule', url='', help="\"NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.\""), - '#withFrom':: d.fn(help='"List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."', args=[d.arg(name='from', type=d.T.array)]), - withFrom(from): { from: if std.isArray(v=from) then from else [from] }, - '#withFromMixin':: d.fn(help='"List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='from', type=d.T.array)]), - withFromMixin(from): { from+: if std.isArray(v=from) then from else [from] }, - '#withPorts':: d.fn(help='"List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet deleted file mode 100644 index 4a23de3ac0c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyPort', url='', help='"NetworkPolicyPort describes a port to allow traffic on"'), - '#withEndPort':: d.fn(help='"If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \\"NetworkPolicyEndPort\\"."', args=[d.arg(name='endPort', type=d.T.integer)]), - withEndPort(endPort): { endPort: endPort }, - '#withPort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='port', type=d.T.string)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet deleted file mode 100644 index cdfadf08597..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicySpec', url='', help='"NetworkPolicySpec provides the specification of a NetworkPolicy"'), - '#podSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - podSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { podSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { podSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { podSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { podSelector+: { matchLabels+: matchLabels } }, - }, - '#withEgress':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), - withEgress(egress): { egress: if std.isArray(v=egress) then egress else [egress] }, - '#withEgressMixin':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), - withEgressMixin(egress): { egress+: if std.isArray(v=egress) then egress else [egress] }, - '#withIngress':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, - '#withIngressMixin':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, - '#withPolicyTypes':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), - withPolicyTypes(policyTypes): { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, - '#withPolicyTypesMixin':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), - withPolicyTypesMixin(policyTypes): { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet deleted file mode 100644 index 2801551ab04..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressPath', url='', help='"HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types. Defaults to ImplementationSpecific.\"", args=[d.arg(name='pathType', type=d.T.string)]), - withPathType(pathType): { pathType: pathType }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet deleted file mode 100644 index a54d695b4fc..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet deleted file mode 100644 index bd621b40231..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet +++ /dev/null @@ -1,85 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'networking.k8s.io/v1beta1', - kind: 'Ingress', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressSpec describes the Ingress the user wishes to exist."'), - spec: { - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { backend+: { resource+: { apiGroup: apiGroup } } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { backend+: { resource+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { backend+: { resource+: { name: name } } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { spec+: { backend+: { serviceName: serviceName } } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { spec+: { backend+: { servicePort: servicePort } } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet deleted file mode 100644 index ee01f37e01d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressBackend', url='', help='"IngressBackend describes all endpoints for a given service and port."'), - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { resource+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { resource+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { serviceName: serviceName }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { servicePort: servicePort }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet deleted file mode 100644 index 2ade9e9ee52..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet +++ /dev/null @@ -1,74 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClass', url='', help='"IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of IngressClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'networking.k8s.io/v1beta1', - kind: 'IngressClass', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressClassSpec provides information about the class of an Ingress."'), - spec: { - '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { parameters+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { parameters+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { parameters+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { parameters+: { namespace: namespace } } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { spec+: { parameters+: { scope: scope } } }, - }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), - withController(controller): { spec+: { controller: controller } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet deleted file mode 100644 index ebba87ea6bf..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), - '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, - }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { host: host }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet deleted file mode 100644 index 35156d74036..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressSpec', url='', help='"IngressSpec describes the Ingress the user wishes to exist."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet deleted file mode 100644 index 95d59bcef55..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet deleted file mode 100644 index 798e438d625..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - httpIngressPath: (import 'httpIngressPath.libsonnet'), - httpIngressRuleValue: (import 'httpIngressRuleValue.libsonnet'), - ingress: (import 'ingress.libsonnet'), - ingressBackend: (import 'ingressBackend.libsonnet'), - ingressClass: (import 'ingressClass.libsonnet'), - ingressClassParametersReference: (import 'ingressClassParametersReference.libsonnet'), - ingressClassSpec: (import 'ingressClassSpec.libsonnet'), - ingressRule: (import 'ingressRule.libsonnet'), - ingressSpec: (import 'ingressSpec.libsonnet'), - ingressStatus: (import 'ingressStatus.libsonnet'), - ingressTLS: (import 'ingressTLS.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet deleted file mode 100644 index fc1ded9abfd..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - overhead: (import 'overhead.libsonnet'), - runtimeClass: (import 'runtimeClass.libsonnet'), - runtimeClassSpec: (import 'runtimeClassSpec.libsonnet'), - scheduling: (import 'scheduling.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet deleted file mode 100644 index 30794b543e2..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet +++ /dev/null @@ -1,79 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'node.k8s.io/v1alpha1', - kind: 'RuntimeClass', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable."'), - spec: { - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { spec+: { overhead+: { podFixed: podFixed } } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { spec+: { overhead+: { podFixed+: podFixed } } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { scheduling+: { nodeSelector: nodeSelector } } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { scheduling+: { nodeSelector+: nodeSelector } } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } }, - }, - '#withRuntimeHandler':: d.fn(help='"RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='runtimeHandler', type=d.T.string)]), - withRuntimeHandler(runtimeHandler): { spec+: { runtimeHandler: runtimeHandler } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet deleted file mode 100644 index 98b1026db37..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClassSpec', url='', help='"RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable."'), - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { scheduling+: { nodeSelector: nodeSelector } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { scheduling+: { nodeSelector+: nodeSelector } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - }, - '#withRuntimeHandler':: d.fn(help='"RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='runtimeHandler', type=d.T.string)]), - withRuntimeHandler(runtimeHandler): { runtimeHandler: runtimeHandler }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet deleted file mode 100644 index cbc6521678c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - overhead: (import 'overhead.libsonnet'), - runtimeClass: (import 'runtimeClass.libsonnet'), - scheduling: (import 'scheduling.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet deleted file mode 100644 index fa33b3e39f3..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { podFixed+: podFixed }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet deleted file mode 100644 index ca5211d7580..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'node.k8s.io/v1beta1', - kind: 'RuntimeClass', - } + self.metadata.withName(name=name), - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { scheduling+: { nodeSelector: nodeSelector } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { scheduling+: { nodeSelector+: nodeSelector } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - }, - '#withHandler':: d.fn(help='"Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), - withHandler(handler): { handler: handler }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet deleted file mode 100644 index 066bfbc80b2..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scheduling', url='', help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet deleted file mode 100644 index 70703dbba0a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedCSIDriver', url='', help='"AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used."'), - '#withName':: d.fn(help='"Name is the registered name of the CSI driver"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet deleted file mode 100644 index ed4e7b9f827..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedFlexVolume', url='', help='"AllowedFlexVolume represents a single Flexvolume that is allowed to be used."'), - '#withDriver':: d.fn(help='"driver is the name of the Flexvolume driver."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { driver: driver }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet deleted file mode 100644 index 921e10cf0e7..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedHostPath', url='', help='"AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined."'), - '#withPathPrefix':: d.fn(help='"pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\\n\\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`"', args=[d.arg(name='pathPrefix', type=d.T.string)]), - withPathPrefix(pathPrefix): { pathPrefix: pathPrefix }, - '#withReadOnly':: d.fn(help='"when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet deleted file mode 100644 index 151bae0196c..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='fsGroupStrategyOptions', url='', help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet deleted file mode 100644 index a804f6faff0..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='hostPortRange', url='', help='"HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined."'), - '#withMax':: d.fn(help='"max is the end of the range, inclusive."', args=[d.arg(name='max', type=d.T.integer)]), - withMax(max): { max: max }, - '#withMin':: d.fn(help='"min is the start of the range, inclusive."', args=[d.arg(name='min', type=d.T.integer)]), - withMin(min): { min: min }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet deleted file mode 100644 index 902c4bb6395..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='idRange', url='', help='"IDRange provides a min/max of an allowed range of IDs."'), - '#withMax':: d.fn(help='"max is the end of the range, inclusive."', args=[d.arg(name='max', type=d.T.integer)]), - withMax(max): { max: max }, - '#withMin':: d.fn(help='"min is the start of the range, inclusive."', args=[d.arg(name='min', type=d.T.integer)]), - withMin(min): { min: min }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet deleted file mode 100644 index 3b0bb525f0f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - allowedCSIDriver: (import 'allowedCSIDriver.libsonnet'), - allowedFlexVolume: (import 'allowedFlexVolume.libsonnet'), - allowedHostPath: (import 'allowedHostPath.libsonnet'), - eviction: (import 'eviction.libsonnet'), - fsGroupStrategyOptions: (import 'fsGroupStrategyOptions.libsonnet'), - hostPortRange: (import 'hostPortRange.libsonnet'), - idRange: (import 'idRange.libsonnet'), - podDisruptionBudget: (import 'podDisruptionBudget.libsonnet'), - podDisruptionBudgetSpec: (import 'podDisruptionBudgetSpec.libsonnet'), - podDisruptionBudgetStatus: (import 'podDisruptionBudgetStatus.libsonnet'), - podSecurityPolicy: (import 'podSecurityPolicy.libsonnet'), - podSecurityPolicySpec: (import 'podSecurityPolicySpec.libsonnet'), - runAsGroupStrategyOptions: (import 'runAsGroupStrategyOptions.libsonnet'), - runAsUserStrategyOptions: (import 'runAsUserStrategyOptions.libsonnet'), - runtimeClassStrategyOptions: (import 'runtimeClassStrategyOptions.libsonnet'), - seLinuxStrategyOptions: (import 'seLinuxStrategyOptions.libsonnet'), - supplementalGroupsStrategyOptions: (import 'supplementalGroupsStrategyOptions.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet deleted file mode 100644 index af495f14f7f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet +++ /dev/null @@ -1,74 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudget', url='', help='"PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PodDisruptionBudget', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'policy/v1beta1', - kind: 'PodDisruptionBudget', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"PodDisruptionBudgetSpec is a description of a PodDisruptionBudget."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), - withMaxUnavailable(maxUnavailable): { spec+: { maxUnavailable: maxUnavailable } }, - '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), - withMinAvailable(minAvailable): { spec+: { minAvailable: minAvailable } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet deleted file mode 100644 index 6147e227838..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet +++ /dev/null @@ -1,178 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podSecurityPolicy', url='', help='"PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PodSecurityPolicy', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'policy/v1beta1', - kind: 'PodSecurityPolicy', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"PodSecurityPolicySpec defines the policy enforced."'), - spec: { - '#fsGroup':: d.obj(help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - fsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { fsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { fsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { fsGroup+: { rule: rule } } }, - }, - '#runAsGroup':: d.obj(help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { runAsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { runAsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { runAsGroup+: { rule: rule } } }, - }, - '#runAsUser':: d.obj(help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsUser: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { runAsUser+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { runAsUser+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { runAsUser+: { rule: rule } } }, - }, - '#runtimeClass':: d.obj(help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - runtimeClass: { - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { spec+: { runtimeClass+: { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } } }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { spec+: { runtimeClass+: { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } } }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { spec+: { runtimeClass+: { defaultRuntimeClassName: defaultRuntimeClassName } } }, - }, - '#seLinux':: d.obj(help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - seLinux: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { seLinux+: { seLinuxOptions+: { level: level } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { seLinux+: { seLinuxOptions+: { role: role } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { seLinux+: { seLinuxOptions+: { type: type } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { seLinux+: { seLinuxOptions+: { user: user } } } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { seLinux+: { rule: rule } } }, - }, - '#supplementalGroups':: d.obj(help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - supplementalGroups: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { supplementalGroups+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { supplementalGroups+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { supplementalGroups+: { rule: rule } } }, - }, - '#withAllowPrivilegeEscalation':: d.fn(help='"allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), - withAllowPrivilegeEscalation(allowPrivilegeEscalation): { spec+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withAllowedCSIDrivers':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDrivers(allowedCSIDrivers): { spec+: { allowedCSIDrivers: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] } }, - '#withAllowedCSIDriversMixin':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDriversMixin(allowedCSIDrivers): { spec+: { allowedCSIDrivers+: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] } }, - '#withAllowedCapabilities':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilities(allowedCapabilities): { spec+: { allowedCapabilities: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] } }, - '#withAllowedCapabilitiesMixin':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilitiesMixin(allowedCapabilities): { spec+: { allowedCapabilities+: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] } }, - '#withAllowedFlexVolumes':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumes(allowedFlexVolumes): { spec+: { allowedFlexVolumes: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] } }, - '#withAllowedFlexVolumesMixin':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumesMixin(allowedFlexVolumes): { spec+: { allowedFlexVolumes+: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] } }, - '#withAllowedHostPaths':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPaths(allowedHostPaths): { spec+: { allowedHostPaths: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] } }, - '#withAllowedHostPathsMixin':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPathsMixin(allowedHostPaths): { spec+: { allowedHostPaths+: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] } }, - '#withAllowedProcMountTypes':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypes(allowedProcMountTypes): { spec+: { allowedProcMountTypes: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] } }, - '#withAllowedProcMountTypesMixin':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypesMixin(allowedProcMountTypes): { spec+: { allowedProcMountTypes+: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] } }, - '#withAllowedUnsafeSysctls':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctls(allowedUnsafeSysctls): { spec+: { allowedUnsafeSysctls: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] } }, - '#withAllowedUnsafeSysctlsMixin':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctlsMixin(allowedUnsafeSysctls): { spec+: { allowedUnsafeSysctls+: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] } }, - '#withDefaultAddCapabilities':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilities(defaultAddCapabilities): { spec+: { defaultAddCapabilities: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] } }, - '#withDefaultAddCapabilitiesMixin':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilitiesMixin(defaultAddCapabilities): { spec+: { defaultAddCapabilities+: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] } }, - '#withDefaultAllowPrivilegeEscalation':: d.fn(help='"defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process."', args=[d.arg(name='defaultAllowPrivilegeEscalation', type=d.T.boolean)]), - withDefaultAllowPrivilegeEscalation(defaultAllowPrivilegeEscalation): { spec+: { defaultAllowPrivilegeEscalation: defaultAllowPrivilegeEscalation } }, - '#withForbiddenSysctls':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctls(forbiddenSysctls): { spec+: { forbiddenSysctls: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] } }, - '#withForbiddenSysctlsMixin':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctlsMixin(forbiddenSysctls): { spec+: { forbiddenSysctls+: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] } }, - '#withHostIPC':: d.fn(help='"hostIPC determines if the policy allows the use of HostIPC in the pod spec."', args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { hostIPC: hostIPC } }, - '#withHostNetwork':: d.fn(help='"hostNetwork determines if the policy allows the use of HostNetwork in the pod spec."', args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, - '#withHostPID':: d.fn(help='"hostPID determines if the policy allows the use of HostPID in the pod spec."', args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { hostPID: hostPID } }, - '#withHostPorts':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPorts(hostPorts): { spec+: { hostPorts: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] } }, - '#withHostPortsMixin':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPortsMixin(hostPorts): { spec+: { hostPorts+: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] } }, - '#withPrivileged':: d.fn(help='"privileged determines if a pod can request to be run as privileged."', args=[d.arg(name='privileged', type=d.T.boolean)]), - withPrivileged(privileged): { spec+: { privileged: privileged } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), - withReadOnlyRootFilesystem(readOnlyRootFilesystem): { spec+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRequiredDropCapabilities':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilities(requiredDropCapabilities): { spec+: { requiredDropCapabilities: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] } }, - '#withRequiredDropCapabilitiesMixin':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilitiesMixin(requiredDropCapabilities): { spec+: { requiredDropCapabilities+: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] } }, - '#withVolumes':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } }, - '#withVolumesMixin':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet deleted file mode 100644 index 49130ef886a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet +++ /dev/null @@ -1,125 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podSecurityPolicySpec', url='', help='"PodSecurityPolicySpec defines the policy enforced."'), - '#fsGroup':: d.obj(help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - fsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { fsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { fsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { fsGroup+: { rule: rule } }, - }, - '#runAsGroup':: d.obj(help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { runAsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { runAsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { runAsGroup+: { rule: rule } }, - }, - '#runAsUser':: d.obj(help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsUser: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { runAsUser+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { runAsUser+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { runAsUser+: { rule: rule } }, - }, - '#runtimeClass':: d.obj(help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - runtimeClass: { - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { runtimeClass+: { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { runtimeClass+: { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { runtimeClass+: { defaultRuntimeClassName: defaultRuntimeClassName } }, - }, - '#seLinux':: d.obj(help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - seLinux: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { seLinux+: { seLinuxOptions+: { level: level } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { seLinux+: { seLinuxOptions+: { role: role } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { seLinux+: { seLinuxOptions+: { type: type } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { seLinux+: { seLinuxOptions+: { user: user } } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { seLinux+: { rule: rule } }, - }, - '#supplementalGroups':: d.obj(help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - supplementalGroups: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { supplementalGroups+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { supplementalGroups+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { supplementalGroups+: { rule: rule } }, - }, - '#withAllowPrivilegeEscalation':: d.fn(help='"allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), - withAllowPrivilegeEscalation(allowPrivilegeEscalation): { allowPrivilegeEscalation: allowPrivilegeEscalation }, - '#withAllowedCSIDrivers':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDrivers(allowedCSIDrivers): { allowedCSIDrivers: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] }, - '#withAllowedCSIDriversMixin':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDriversMixin(allowedCSIDrivers): { allowedCSIDrivers+: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] }, - '#withAllowedCapabilities':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilities(allowedCapabilities): { allowedCapabilities: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] }, - '#withAllowedCapabilitiesMixin':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilitiesMixin(allowedCapabilities): { allowedCapabilities+: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] }, - '#withAllowedFlexVolumes':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumes(allowedFlexVolumes): { allowedFlexVolumes: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] }, - '#withAllowedFlexVolumesMixin':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumesMixin(allowedFlexVolumes): { allowedFlexVolumes+: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] }, - '#withAllowedHostPaths':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPaths(allowedHostPaths): { allowedHostPaths: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] }, - '#withAllowedHostPathsMixin':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPathsMixin(allowedHostPaths): { allowedHostPaths+: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] }, - '#withAllowedProcMountTypes':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypes(allowedProcMountTypes): { allowedProcMountTypes: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] }, - '#withAllowedProcMountTypesMixin':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypesMixin(allowedProcMountTypes): { allowedProcMountTypes+: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] }, - '#withAllowedUnsafeSysctls':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctls(allowedUnsafeSysctls): { allowedUnsafeSysctls: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] }, - '#withAllowedUnsafeSysctlsMixin':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctlsMixin(allowedUnsafeSysctls): { allowedUnsafeSysctls+: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] }, - '#withDefaultAddCapabilities':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilities(defaultAddCapabilities): { defaultAddCapabilities: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] }, - '#withDefaultAddCapabilitiesMixin':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilitiesMixin(defaultAddCapabilities): { defaultAddCapabilities+: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] }, - '#withDefaultAllowPrivilegeEscalation':: d.fn(help='"defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process."', args=[d.arg(name='defaultAllowPrivilegeEscalation', type=d.T.boolean)]), - withDefaultAllowPrivilegeEscalation(defaultAllowPrivilegeEscalation): { defaultAllowPrivilegeEscalation: defaultAllowPrivilegeEscalation }, - '#withForbiddenSysctls':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctls(forbiddenSysctls): { forbiddenSysctls: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] }, - '#withForbiddenSysctlsMixin':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctlsMixin(forbiddenSysctls): { forbiddenSysctls+: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] }, - '#withHostIPC':: d.fn(help='"hostIPC determines if the policy allows the use of HostIPC in the pod spec."', args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { hostIPC: hostIPC }, - '#withHostNetwork':: d.fn(help='"hostNetwork determines if the policy allows the use of HostNetwork in the pod spec."', args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { hostNetwork: hostNetwork }, - '#withHostPID':: d.fn(help='"hostPID determines if the policy allows the use of HostPID in the pod spec."', args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { hostPID: hostPID }, - '#withHostPorts':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPorts(hostPorts): { hostPorts: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] }, - '#withHostPortsMixin':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPortsMixin(hostPorts): { hostPorts+: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] }, - '#withPrivileged':: d.fn(help='"privileged determines if a pod can request to be run as privileged."', args=[d.arg(name='privileged', type=d.T.boolean)]), - withPrivileged(privileged): { privileged: privileged }, - '#withReadOnlyRootFilesystem':: d.fn(help='"readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), - withReadOnlyRootFilesystem(readOnlyRootFilesystem): { readOnlyRootFilesystem: readOnlyRootFilesystem }, - '#withRequiredDropCapabilities':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilities(requiredDropCapabilities): { requiredDropCapabilities: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] }, - '#withRequiredDropCapabilitiesMixin':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilitiesMixin(requiredDropCapabilities): { requiredDropCapabilities+: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] }, - '#withVolumes':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { volumes: if std.isArray(v=volumes) then volumes else [volumes] }, - '#withVolumesMixin':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { volumes+: if std.isArray(v=volumes) then volumes else [volumes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet deleted file mode 100644 index ce9ce9e48e4..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runAsGroupStrategyOptions', url='', help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet deleted file mode 100644 index 4eff3f52590..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runAsUserStrategyOptions', url='', help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet deleted file mode 100644 index d6e31153e35..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClassStrategyOptions', url='', help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { defaultRuntimeClassName: defaultRuntimeClassName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet deleted file mode 100644 index 513de55c3d5..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='seLinuxStrategyOptions', url='', help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { seLinuxOptions+: { level: level } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { seLinuxOptions+: { role: role } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { seLinuxOptions+: { type: type } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { seLinuxOptions+: { user: user } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet deleted file mode 100644 index e9505d76d0d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='supplementalGroupsStrategyOptions', url='', help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet deleted file mode 100644 index 01149764463..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet +++ /dev/null @@ -1,67 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRole', url='', help='"ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22."'), - '#aggregationRule':: d.obj(help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - aggregationRule: { - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'ClusterRole', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet deleted file mode 100644 index cababc33d00..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - aggregationRule: (import 'aggregationRule.libsonnet'), - clusterRole: (import 'clusterRole.libsonnet'), - clusterRoleBinding: (import 'clusterRoleBinding.libsonnet'), - policyRule: (import 'policyRule.libsonnet'), - role: (import 'role.libsonnet'), - roleBinding: (import 'roleBinding.libsonnet'), - roleRef: (import 'roleRef.libsonnet'), - subject: (import 'subject.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet deleted file mode 100644 index 9b15ccb2c07..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."', args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet deleted file mode 100644 index 0f7d7b3b7ad..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'Role', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this Role"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this Role"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet deleted file mode 100644 index 29bc1b90432..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subject', url='', help='"Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names."'), - '#withKind':: d.fn(help='"Kind of object being referenced. Values defined by this API group are \\"User\\", \\"Group\\", and \\"ServiceAccount\\". If the Authorizer does not recognized the kind value, the Authorizer should report an error."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace of the referenced object. If the object kind is non-namespace, such as \\"User\\" or \\"Group\\", and this value is not empty the Authorizer should report an error."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet deleted file mode 100644 index 086524f9025..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='aggregationRule', url='', help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet deleted file mode 100644 index e4ff8663399..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet +++ /dev/null @@ -1,67 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRole', url='', help='"ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22."'), - '#aggregationRule':: d.obj(help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - aggregationRule: { - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'ClusterRole', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet deleted file mode 100644 index 6c0a6fbb951..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'ClusterRoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet deleted file mode 100644 index 67591074ee6..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - aggregationRule: (import 'aggregationRule.libsonnet'), - clusterRole: (import 'clusterRole.libsonnet'), - clusterRoleBinding: (import 'clusterRoleBinding.libsonnet'), - policyRule: (import 'policyRule.libsonnet'), - role: (import 'role.libsonnet'), - roleBinding: (import 'roleBinding.libsonnet'), - roleRef: (import 'roleRef.libsonnet'), - subject: (import 'subject.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet deleted file mode 100644 index 548338dfcc1..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.\"", args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet deleted file mode 100644 index 26abb745707..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'Role', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this Role"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this Role"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet deleted file mode 100644 index 9712e16f485..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'RoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet deleted file mode 100644 index 870b3ac95ae..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleRef', url='', help='"RoleRef contains information that points to the role being used"'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet deleted file mode 100644 index 5339b8fa918..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityClass', url='', help='"DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'scheduling.k8s.io/v1alpha1', - kind: 'PriorityClass', - } + self.metadata.withName(name=name), - '#withDescription':: d.fn(help='"description is an arbitrary string that usually provides guidelines on when this priority class should be used."', args=[d.arg(name='description', type=d.T.string)]), - withDescription(description): { description: description }, - '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), - withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet deleted file mode 100644 index 3b681adec03..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - priorityClass: (import 'priorityClass.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet deleted file mode 100644 index 6530293e600..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityClass', url='', help='"DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'scheduling.k8s.io/v1beta1', - kind: 'PriorityClass', - } + self.metadata.withName(name=name), - '#withDescription':: d.fn(help='"description is an arbitrary string that usually provides guidelines on when this priority class should be used."', args=[d.arg(name='description', type=d.T.string)]), - withDescription(description): { description: description }, - '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), - withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet deleted file mode 100644 index f07c80be789..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { attachRequired: attachRequired }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet deleted file mode 100644 index 86d2b1336c5..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1', url='', help=''), - csiDriver: (import 'csiDriver.libsonnet'), - csiDriverSpec: (import 'csiDriverSpec.libsonnet'), - csiNode: (import 'csiNode.libsonnet'), - csiNodeDriver: (import 'csiNodeDriver.libsonnet'), - csiNodeSpec: (import 'csiNodeSpec.libsonnet'), - storageClass: (import 'storageClass.libsonnet'), - tokenRequest: (import 'tokenRequest.libsonnet'), - volumeAttachment: (import 'volumeAttachment.libsonnet'), - volumeAttachmentSource: (import 'volumeAttachmentSource.libsonnet'), - volumeAttachmentSpec: (import 'volumeAttachmentSpec.libsonnet'), - volumeAttachmentStatus: (import 'volumeAttachmentStatus.libsonnet'), - volumeError: (import 'volumeError.libsonnet'), - volumeNodeResources: (import 'volumeNodeResources.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet deleted file mode 100644 index 47ca4b20576..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet +++ /dev/null @@ -1,419 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSource', url='', help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { claimRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { claimRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { claimRef+: { uid: uid } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet deleted file mode 100644 index 8b092052a56..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet +++ /dev/null @@ -1,426 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSpec', url='', help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 638334f0b9d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet deleted file mode 100644 index d9894f93089..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet deleted file mode 100644 index e5f0a9d679a..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet +++ /dev/null @@ -1,73 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSIStorageCapacity', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1alpha1', - kind: 'CSIStorageCapacity', - } + self.metadata.withName(name=name), - '#nodeTopology':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - nodeTopology: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { nodeTopology+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { nodeTopology+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { nodeTopology+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { nodeTopology+: { matchLabels+: matchLabels } }, - }, - '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), - withCapacity(capacity): { capacity: capacity }, - '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), - withMaximumVolumeSize(maximumVolumeSize): { maximumVolumeSize: maximumVolumeSize }, - '#withStorageClassName':: d.fn(help='"The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { storageClassName: storageClassName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet deleted file mode 100644 index d84b7da6f7f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - csiStorageCapacity: (import 'csiStorageCapacity.libsonnet'), - volumeAttachment: (import 'volumeAttachment.libsonnet'), - volumeAttachmentSource: (import 'volumeAttachmentSource.libsonnet'), - volumeAttachmentSpec: (import 'volumeAttachmentSpec.libsonnet'), - volumeAttachmentStatus: (import 'volumeAttachmentStatus.libsonnet'), - volumeError: (import 'volumeError.libsonnet'), -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet deleted file mode 100644 index 1ca30669fb4..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet +++ /dev/null @@ -1,479 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1alpha1', - kind: 'VolumeAttachment', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - spec: { - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { nodeName: nodeName } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet deleted file mode 100644 index 8b092052a56..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet +++ /dev/null @@ -1,426 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSpec', url='', help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 155e537a9c8..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet deleted file mode 100644 index f77069c446f..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { time: time }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet deleted file mode 100644 index 34a6fb0a69b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet +++ /dev/null @@ -1,77 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriver', url='', help='"CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSIDriver', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'CSIDriver', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CSIDriverSpec is the specification of a CSIDriver."'), - spec: { - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { spec+: { attachRequired: attachRequired } }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { spec+: { fsGroupPolicy: fsGroupPolicy } }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { spec+: { podInfoOnMount: podInfoOnMount } }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { spec+: { requiresRepublish: requiresRepublish } }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { spec+: { storageCapacity: storageCapacity } }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { spec+: { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { spec+: { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withVolumeLifecycleModes':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { spec+: { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { spec+: { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet deleted file mode 100644 index 0618b0af909..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { attachRequired: attachRequired }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withVolumeLifecycleModes':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet deleted file mode 100644 index 634ba66a6da..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet +++ /dev/null @@ -1,63 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNode', url='', help="\"DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.\""), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSINode', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'CSINode', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CSINodeSpec holds information about the specification of all CSI drivers installed on a node"'), - spec: { - '#withDrivers':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."', args=[d.arg(name='drivers', type=d.T.array)]), - withDrivers(drivers): { spec+: { drivers: if std.isArray(v=drivers) then drivers else [drivers] } }, - '#withDriversMixin':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drivers', type=d.T.array)]), - withDriversMixin(drivers): { spec+: { drivers+: if std.isArray(v=drivers) then drivers else [drivers] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet deleted file mode 100644 index 3c1b88e164b..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='storageClass', url='', help='"StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\\n\\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of StorageClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'StorageClass', - } + self.metadata.withName(name=name), - '#withAllowVolumeExpansion':: d.fn(help='"AllowVolumeExpansion shows whether the storage class allow volume expand"', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), - withAllowVolumeExpansion(allowVolumeExpansion): { allowVolumeExpansion: allowVolumeExpansion }, - '#withAllowedTopologies':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), - withAllowedTopologies(allowedTopologies): { allowedTopologies: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withAllowedTopologiesMixin':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), - withAllowedTopologiesMixin(allowedTopologies): { allowedTopologies+: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withMountOptions':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withParameters':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), - withParameters(parameters): { parameters: parameters }, - '#withParametersMixin':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), - withParametersMixin(parameters): { parameters+: parameters }, - '#withProvisioner':: d.fn(help='"Provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), - withProvisioner(provisioner): { provisioner: provisioner }, - '#withReclaimPolicy':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), - withReclaimPolicy(reclaimPolicy): { reclaimPolicy: reclaimPolicy }, - '#withVolumeBindingMode':: d.fn(help='"VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), - withVolumeBindingMode(volumeBindingMode): { volumeBindingMode: volumeBindingMode }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet deleted file mode 100644 index 50628cb06f6..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest contains parameters of a service account token."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), - withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), - withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet deleted file mode 100644 index 5dbd7b70227..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet +++ /dev/null @@ -1,479 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'VolumeAttachment', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - spec: { - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { nodeName: nodeName } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 638334f0b9d..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet deleted file mode 100644 index 87f1e4e18b8..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { time: time }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet deleted file mode 100644 index 0281968fcfe..00000000000 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet similarity index 93% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet index 27a4b615704..1586fa73fd2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet @@ -1,3 +1,4 @@ +local gen = import '../gen.libsonnet'; local d = import 'doc-util/main.libsonnet'; local patch = { @@ -33,7 +34,7 @@ local patch = { ):: local labels = { name: name } + podLabels; super.new(name) - + super.spec.withReplicas(replicas) + + (if replicas == null then {} else super.spec.withReplicas(replicas)) + super.spec.template.spec.withContainers(containers) + super.spec.template.metadata.withLabels(labels) + super.spec.selector.withMatchLabels(labels), @@ -72,12 +73,7 @@ local patch = { }; { - extensions+: { // old, remove asap - v1beta1+: patch, - }, apps+: { v1+: patch, - v1beta1+: patch, - v1beta2+: patch, }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet similarity index 94% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet index bcd223b4038..d48005cfad7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet @@ -27,7 +27,5 @@ local patch = { autoscaling+: { v1+: patch, v2+: patch, - v2beta1+: patch, - v2beta2+: patch, }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet similarity index 92% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet index 7004a7357d9..3b39ad76806 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet @@ -22,7 +22,5 @@ local patch = { { batch+: { v1+: patch, - v1beta1+: patch, - v2alpha1+: patch, }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/core.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/core.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/core.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/core.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/list.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/list.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/list.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/list.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet index 5797c1f8d24..bba2659bb6b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet @@ -27,7 +27,7 @@ local patch = { }, '#mapContainersWithName': d.fn('`mapContainersWithName` is like `mapContainers`, but only applies to those containers in the `names` array', - [d.arg('names', d.T.array), d.arg('f', d.T.func)]), + [d.arg('names', d.T.array), d.arg('f', d.T.func)]), mapContainersWithName(names, f, includeInitContainers=false):: local nameSet = if std.type(names) == 'array' then std.set(names) else std.set([names]); local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0; @@ -56,24 +56,25 @@ local cronPatch = patch { }; { - core+: { v1+: { - pod+: patch, - podTemplate+: patch, - replicationController+: patch, - } }, + core+: { + v1+: { + pod+: patch, + podTemplate+: patch, + replicationController+: patch, + }, + }, batch+: { v1+: { job+: patch, cronJob+: cronPatch, }, - v1beta1+: { - cronJob+: cronPatch, + }, + apps+: { + v1+: { + daemonSet+: patch, + deployment+: patch, + replicaSet+: patch, + statefulSet+: patch, }, }, - apps+: { v1+: { - daemonSet+: patch, - deployment+: patch, - replicaSet+: patch, - statefulSet+: patch, - } }, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet similarity index 95% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet index 69327fb6116..ab3d1ab04c4 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet @@ -37,7 +37,5 @@ local patch = { { rbac+: { v1+: patch, - v1alpha1+: patch, - v1beta1+: patch, }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet similarity index 98% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet index 349fa010f99..9c911e8e14d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/volumeMounts.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet @@ -85,7 +85,7 @@ local d = import 'doc-util/main.libsonnet'; else {} ); local volumeMixins = [volume.fromConfigMap(name, name) + volumeMixin]; - local annotations = {['%s-hash' % name]: hash}; + local annotations = { ['%s-hash' % name]: hash }; super.mapContainers(addMount, includeInitContainers=includeInitContainers) + if std.objectHas(super.spec, 'template') @@ -311,14 +311,13 @@ local d = import 'doc-util/main.libsonnet'; job+: patch, cronJob+: patch, }, - v1beta1+: { - cronJob+: patch, + }, + apps+: { + v1+: { + daemonSet+: patch, + deployment+: patch, + replicaSet+: patch, + statefulSet+: patch, }, }, - apps+: { v1+: { - daemonSet+: patch, - deployment+: patch, - replicaSet+: patch, - statefulSet+: patch, - } }, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet index 5c9c107733f..a64ea12d58a 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet @@ -2,5 +2,6 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='admissionregistration', url='', help=''), v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet similarity index 91% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet index c98e455e884..3d4dad1ab2d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), + matchCondition: (import 'matchCondition.libsonnet'), mutatingWebhook: (import 'mutatingWebhook.libsonnet'), mutatingWebhookConfiguration: (import 'mutatingWebhookConfiguration.libsonnet'), ruleWithOperations: (import 'ruleWithOperations.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet new file mode 100644 index 00000000000..d28c501da10 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help='"MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet index 1822f48700b..6bf8f54c1f9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet @@ -47,6 +47,10 @@ withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet index d8375bfd0e7..b3108c6fa60 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='mutatingWebhookConfiguration', url='', help='"MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of MutatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/ruleWithOperations.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/ruleWithOperations.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/serviceReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/serviceReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet index ae3795634c8..911443e0935 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet @@ -47,6 +47,10 @@ withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet index 3583a79bd8a..4814f4616f5 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='validatingWebhookConfiguration', url='', help='"ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ValidatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/webhookClientConfig.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/webhookClientConfig.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet new file mode 100644 index 00000000000..96c7387270c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='auditAnnotation', url='', help='"AuditAnnotation describes how to produce an audit annotation for an API request."'), + '#withKey':: d.fn(help='"key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\\n\\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \\"{ValidatingAdmissionPolicy name}/{key}\\".\\n\\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\\n\\nRequired."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withValueExpression':: d.fn(help='"valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\\n\\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\\n\\nRequired."', args=[d.arg(name='valueExpression', type=d.T.string)]), + withValueExpression(valueExpression): { valueExpression: valueExpression }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet new file mode 100644 index 00000000000..895ad9dcdcb --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='expressionWarning', url='', help='"ExpressionWarning is a warning information that targets a specific expression."'), + '#withFieldRef':: d.fn(help='"The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \\"spec.validations[0].expression\\', args=[d.arg(name='fieldRef', type=d.T.string)]), + withFieldRef(fieldRef): { fieldRef: fieldRef }, + '#withWarning':: d.fn(help='"The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler."', args=[d.arg(name='warning', type=d.T.string)]), + withWarning(warning): { warning: warning }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..cd3fb220d90 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + auditAnnotation: (import 'auditAnnotation.libsonnet'), + expressionWarning: (import 'expressionWarning.libsonnet'), + matchCondition: (import 'matchCondition.libsonnet'), + matchResources: (import 'matchResources.libsonnet'), + namedRuleWithOperations: (import 'namedRuleWithOperations.libsonnet'), + paramKind: (import 'paramKind.libsonnet'), + paramRef: (import 'paramRef.libsonnet'), + typeChecking: (import 'typeChecking.libsonnet'), + validatingAdmissionPolicy: (import 'validatingAdmissionPolicy.libsonnet'), + validatingAdmissionPolicyBinding: (import 'validatingAdmissionPolicyBinding.libsonnet'), + validatingAdmissionPolicyBindingSpec: (import 'validatingAdmissionPolicyBindingSpec.libsonnet'), + validatingAdmissionPolicySpec: (import 'validatingAdmissionPolicySpec.libsonnet'), + validatingAdmissionPolicyStatus: (import 'validatingAdmissionPolicyStatus.libsonnet'), + validation: (import 'validation.libsonnet'), + variable: (import 'variable.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet new file mode 100644 index 00000000000..3873071158c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help=''), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet new file mode 100644 index 00000000000..0215d0c956c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchResources', url='', help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet index d50b9c57c0c..a1726a57081 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/ruleWithOperations.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ruleWithOperations', url='', help='"RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid."'), + '#':: d.pkg(name='namedRuleWithOperations', url='', help='"NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames."'), '#withApiGroups':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withApiGroupsMixin':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiGroups', type=d.T.array)]), @@ -13,6 +13,10 @@ withOperations(operations): { operations: if std.isArray(v=operations) then operations else [operations] }, '#withOperationsMixin':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='operations', type=d.T.array)]), withOperationsMixin(operations): { operations+: if std.isArray(v=operations) then operations else [operations] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"", args=[d.arg(name='resources', type=d.T.array)]), withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet new file mode 100644 index 00000000000..11a349422a7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramKind', url='', help='"ParamKind is a tuple of Group Kind and Version."'), + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet similarity index 51% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet index 84917662145..9781cdd7115 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudgetSpec', url='', help='"PodDisruptionBudgetSpec is a description of a PodDisruptionBudget."'), + '#':: d.pkg(name='paramRef', url='', help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -12,10 +12,12 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, }, - '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), - withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, - '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), - withMinAvailable(minAvailable): { minAvailable: minAvailable }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { parameterNotFoundAction: parameterNotFoundAction }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet new file mode 100644 index 00000000000..1af60568c08 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typeChecking', url='', help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet new file mode 100644 index 00000000000..a1b1a4fe407 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet @@ -0,0 +1,117 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicy', url='', help='"ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicy', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1alpha1', + kind: 'ValidatingAdmissionPolicy', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + spec: { + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchConstraints+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { paramKind+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { paramKind+: { kind: kind } } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { spec+: { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { spec+: { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { spec+: { failurePolicy: failurePolicy } }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { spec+: { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { spec+: { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { spec+: { validations: if std.isArray(v=validations) then validations else [validations] } }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { spec+: { validations+: if std.isArray(v=validations) then validations else [validations] } }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { spec+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { spec+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet new file mode 100644 index 00000000000..3a6143e95a8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet @@ -0,0 +1,118 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBinding', url='', help="\"ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\\n\\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\\n\\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicyBinding', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1alpha1', + kind: 'ValidatingAdmissionPolicyBinding', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + spec: { + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchResources+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { paramRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { paramRef+: { namespace: namespace } } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { spec+: { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { spec+: { policyName: policyName } }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { spec+: { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { spec+: { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet new file mode 100644 index 00000000000..c08ef317d61 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet @@ -0,0 +1,67 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBindingSpec', url='', help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchResources+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { paramRef+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { paramRef+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { paramRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { paramRef+: { namespace: namespace } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { policyName: policyName }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet new file mode 100644 index 00000000000..3bc96c0cd22 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicySpec', url='', help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchConstraints+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { paramKind+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { paramKind+: { kind: kind } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { validations: if std.isArray(v=validations) then validations else [validations] }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { validations+: if std.isArray(v=validations) then validations else [validations] }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { variables: if std.isArray(v=variables) then variables else [variables] }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { variables+: if std.isArray(v=variables) then variables else [variables] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet new file mode 100644 index 00000000000..a9130f09b74 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyStatus', url='', help='"ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy."'), + '#typeChecking':: d.obj(help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + typeChecking: { + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { typeChecking+: { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { typeChecking+: { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + }, + '#withConditions':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withObservedGeneration':: d.fn(help='"The generation observed by the controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet new file mode 100644 index 00000000000..db493d390af --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validation', url='', help='"Validation specifies the CEL expression which is used to apply the validation."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\\n\\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\n\\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\\n\\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\\n\\t \\\"true\\\", \\\"false\\\", \\\"null\\\", \\\"in\\\", \\\"as\\\", \\\"break\\\", \\\"const\\\", \\\"continue\\\", \\\"else\\\", \\\"for\\\", \\\"function\\\", \\\"if\\\",\\n\\t \\\"import\\\", \\\"let\\\", \\\"loop\\\", \\\"package\\\", \\\"namespace\\\", \\\"return\\\".\\nExamples:\\n - Expression accessing a property named \\\"namespace\\\": {\\\"Expression\\\": \\\"object.__namespace__ \u003e 0\\\"}\\n - Expression accessing a property named \\\"x-prop\\\": {\\\"Expression\\\": \\\"object.x__dash__prop \u003e 0\\\"}\\n - Expression accessing a property named \\\"redact__d\\\": {\\\"Expression\\\": \\\"object.redact__underscores__d \u003e 0\\\"}\\n\\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\\n non-intersecting elements in `Y` are appended, retaining their partial order.\\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\\n non-intersecting keys are appended, retaining their partial order.\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withMessage':: d.fn(help='"Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \\"failed rule: {Rule}\\". e.g. \\"must be a URL with the host matching spec.host\\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \\"failed Expression: {Expression}\\"."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withMessageExpression':: d.fn(help="\"messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \\\"object.x must be less than max (\\\"+string(params.max)+\\\")\\", args=[d.arg(name='messageExpression', type=d.T.string)]), + withMessageExpression(messageExpression): { messageExpression: messageExpression }, + '#withReason':: d.fn(help='"Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \\"Unauthorized\\", \\"Forbidden\\", \\"Invalid\\", \\"RequestEntityTooLarge\\". If not set, StatusReasonInvalid is used in the response to the client."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet new file mode 100644 index 00000000000..98f05ca7b2f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='variable', url='', help='"Variable is the definition of a variable that is used for composition."'), + '#withExpression':: d.fn(help='"Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation."', args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help='"Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \\"foo\\", the variable will be available as `variables.foo`"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet new file mode 100644 index 00000000000..96c7387270c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='auditAnnotation', url='', help='"AuditAnnotation describes how to produce an audit annotation for an API request."'), + '#withKey':: d.fn(help='"key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\\n\\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \\"{ValidatingAdmissionPolicy name}/{key}\\".\\n\\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\\n\\nRequired."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withValueExpression':: d.fn(help='"valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\\n\\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\\n\\nRequired."', args=[d.arg(name='valueExpression', type=d.T.string)]), + withValueExpression(valueExpression): { valueExpression: valueExpression }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet new file mode 100644 index 00000000000..895ad9dcdcb --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='expressionWarning', url='', help='"ExpressionWarning is a warning information that targets a specific expression."'), + '#withFieldRef':: d.fn(help='"The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \\"spec.validations[0].expression\\', args=[d.arg(name='fieldRef', type=d.T.string)]), + withFieldRef(fieldRef): { fieldRef: fieldRef }, + '#withWarning':: d.fn(help='"The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler."', args=[d.arg(name='warning', type=d.T.string)]), + withWarning(warning): { warning: warning }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet new file mode 100644 index 00000000000..4a6b9fd4b56 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + auditAnnotation: (import 'auditAnnotation.libsonnet'), + expressionWarning: (import 'expressionWarning.libsonnet'), + matchCondition: (import 'matchCondition.libsonnet'), + matchResources: (import 'matchResources.libsonnet'), + namedRuleWithOperations: (import 'namedRuleWithOperations.libsonnet'), + paramKind: (import 'paramKind.libsonnet'), + paramRef: (import 'paramRef.libsonnet'), + typeChecking: (import 'typeChecking.libsonnet'), + validatingAdmissionPolicy: (import 'validatingAdmissionPolicy.libsonnet'), + validatingAdmissionPolicyBinding: (import 'validatingAdmissionPolicyBinding.libsonnet'), + validatingAdmissionPolicyBindingSpec: (import 'validatingAdmissionPolicyBindingSpec.libsonnet'), + validatingAdmissionPolicySpec: (import 'validatingAdmissionPolicySpec.libsonnet'), + validatingAdmissionPolicyStatus: (import 'validatingAdmissionPolicyStatus.libsonnet'), + validation: (import 'validation.libsonnet'), + variable: (import 'variable.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet new file mode 100644 index 00000000000..8d196606489 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help='"MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet new file mode 100644 index 00000000000..0215d0c956c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchResources', url='', help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet index d50b9c57c0c..a1726a57081 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ruleWithOperations', url='', help='"RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid."'), + '#':: d.pkg(name='namedRuleWithOperations', url='', help='"NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames."'), '#withApiGroups':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withApiGroupsMixin':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiGroups', type=d.T.array)]), @@ -13,6 +13,10 @@ withOperations(operations): { operations: if std.isArray(v=operations) then operations else [operations] }, '#withOperationsMixin':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='operations', type=d.T.array)]), withOperationsMixin(operations): { operations+: if std.isArray(v=operations) then operations else [operations] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"", args=[d.arg(name='resources', type=d.T.array)]), withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet new file mode 100644 index 00000000000..11a349422a7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramKind', url='', help='"ParamKind is a tuple of Group Kind and Version."'), + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet new file mode 100644 index 00000000000..51da556e5c7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramRef', url='', help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { parameterNotFoundAction: parameterNotFoundAction }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet new file mode 100644 index 00000000000..1af60568c08 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typeChecking', url='', help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet new file mode 100644 index 00000000000..789e7e9adf2 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet @@ -0,0 +1,117 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicy', url='', help='"ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicy', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1beta1', + kind: 'ValidatingAdmissionPolicy', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + spec: { + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchConstraints+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { paramKind+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { paramKind+: { kind: kind } } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { spec+: { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { spec+: { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { spec+: { failurePolicy: failurePolicy } }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { spec+: { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { spec+: { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { spec+: { validations: if std.isArray(v=validations) then validations else [validations] } }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { spec+: { validations+: if std.isArray(v=validations) then validations else [validations] } }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { spec+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { spec+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet new file mode 100644 index 00000000000..12276863e67 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet @@ -0,0 +1,118 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBinding', url='', help="\"ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\\n\\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\\n\\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicyBinding', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1beta1', + kind: 'ValidatingAdmissionPolicyBinding', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + spec: { + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchResources+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { paramRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { paramRef+: { namespace: namespace } } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { spec+: { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { spec+: { policyName: policyName } }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { spec+: { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { spec+: { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet new file mode 100644 index 00000000000..e1ebe62488f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet @@ -0,0 +1,67 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBindingSpec', url='', help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchResources+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { paramRef+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { paramRef+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { paramRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { paramRef+: { namespace: namespace } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { policyName: policyName }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet new file mode 100644 index 00000000000..3bc96c0cd22 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicySpec', url='', help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchConstraints+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { paramKind+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { paramKind+: { kind: kind } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { validations: if std.isArray(v=validations) then validations else [validations] }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { validations+: if std.isArray(v=validations) then validations else [validations] }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { variables: if std.isArray(v=variables) then variables else [variables] }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { variables+: if std.isArray(v=variables) then variables else [variables] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet new file mode 100644 index 00000000000..6d150d52f4a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyStatus', url='', help='"ValidatingAdmissionPolicyStatus represents the status of an admission validation policy."'), + '#typeChecking':: d.obj(help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + typeChecking: { + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { typeChecking+: { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { typeChecking+: { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + }, + '#withConditions':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withObservedGeneration':: d.fn(help='"The generation observed by the controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet new file mode 100644 index 00000000000..db493d390af --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validation', url='', help='"Validation specifies the CEL expression which is used to apply the validation."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\\n\\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\n\\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\\n\\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\\n\\t \\\"true\\\", \\\"false\\\", \\\"null\\\", \\\"in\\\", \\\"as\\\", \\\"break\\\", \\\"const\\\", \\\"continue\\\", \\\"else\\\", \\\"for\\\", \\\"function\\\", \\\"if\\\",\\n\\t \\\"import\\\", \\\"let\\\", \\\"loop\\\", \\\"package\\\", \\\"namespace\\\", \\\"return\\\".\\nExamples:\\n - Expression accessing a property named \\\"namespace\\\": {\\\"Expression\\\": \\\"object.__namespace__ \u003e 0\\\"}\\n - Expression accessing a property named \\\"x-prop\\\": {\\\"Expression\\\": \\\"object.x__dash__prop \u003e 0\\\"}\\n - Expression accessing a property named \\\"redact__d\\\": {\\\"Expression\\\": \\\"object.redact__underscores__d \u003e 0\\\"}\\n\\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\\n non-intersecting elements in `Y` are appended, retaining their partial order.\\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\\n non-intersecting keys are appended, retaining their partial order.\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withMessage':: d.fn(help='"Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \\"failed rule: {Rule}\\". e.g. \\"must be a URL with the host matching spec.host\\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \\"failed Expression: {Expression}\\"."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withMessageExpression':: d.fn(help="\"messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \\\"object.x must be less than max (\\\"+string(params.max)+\\\")\\", args=[d.arg(name='messageExpression', type=d.T.string)]), + withMessageExpression(messageExpression): { messageExpression: messageExpression }, + '#withReason':: d.fn(help='"Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \\"Unauthorized\\", \\"Forbidden\\", \\"Invalid\\", \\"RequestEntityTooLarge\\". If not set, StatusReasonInvalid is used in the response to the client."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet new file mode 100644 index 00000000000..084c27b79b3 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='variable', url='', help='"Variable is the definition of a variable that is used for composition. A variable is defined as a named expression."'), + '#withExpression':: d.fn(help='"Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation."', args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help='"Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \\"foo\\", the variable will be available as `variables.foo`"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet index f4839e1e05b..9ec84c9aac9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='apiregistration', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet index e1671ea3b21..53413e2a246 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='apiService', url='', help='"APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\"."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of APIService', args=[d.arg(name='name', type=d.T.string)]), @@ -66,7 +64,7 @@ withCaBundle(caBundle): { spec+: { caBundle: caBundle } }, '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { group: group } }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), + '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), withGroupPriorityMinimum(groupPriorityMinimum): { spec+: { groupPriorityMinimum: groupPriorityMinimum } }, '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), withInsecureSkipTLSVerify(insecureSkipTLSVerify): { spec+: { insecureSkipTLSVerify: insecureSkipTLSVerify } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet similarity index 96% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet index 88d858e5edd..49513ec0e31 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet @@ -14,7 +14,7 @@ withCaBundle(caBundle): { caBundle: caBundle }, '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { group: group }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), + '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), withGroupPriorityMinimum(groupPriorityMinimum): { groupPriorityMinimum: groupPriorityMinimum }, '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), withInsecureSkipTLSVerify(insecureSkipTLSVerify): { insecureSkipTLSVerify: insecureSkipTLSVerify }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/serviceReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/serviceReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet similarity index 68% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet index 52467bb3906..77f8c2fe6d2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet @@ -9,6 +9,10 @@ withDecodableVersionsMixin(decodableVersions): { decodableVersions+: if std.isArray(v=decodableVersions) then decodableVersions else [decodableVersions] }, '#withEncodingVersion':: d.fn(help='"The API server encodes the object to this version when persisting it in the backend (e.g., etcd)."', args=[d.arg(name='encodingVersion', type=d.T.string)]), withEncodingVersion(encodingVersion): { encodingVersion: encodingVersion }, + '#withServedVersions':: d.fn(help='"The API server can serve these versions. DecodableVersions must include all ServedVersions."', args=[d.arg(name='servedVersions', type=d.T.array)]), + withServedVersions(servedVersions): { servedVersions: if std.isArray(v=servedVersions) then servedVersions else [servedVersions] }, + '#withServedVersionsMixin':: d.fn(help='"The API server can serve these versions. DecodableVersions must include all ServedVersions."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='servedVersions', type=d.T.array)]), + withServedVersionsMixin(servedVersions): { servedVersions+: if std.isArray(v=servedVersions) then servedVersions else [servedVersions] }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet similarity index 83% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet index 9dcc22785b1..ded412050d1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='storageVersion', url='', help='"\\n Storage version of a specific resource."'), + '#':: d.pkg(name='storageVersion', url='', help='"Storage version of a specific resource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StorageVersion', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet similarity index 71% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet index 135fa851dd7..370461e3999 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='controllerRevision', url='', help='"ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ControllerRevision', args=[d.arg(name='name', type=d.T.string)]), @@ -51,9 +49,9 @@ apiVersion: 'apps/v1', kind: 'ControllerRevision', } + self.metadata.withName(name=name), - '#withData':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='data', type=d.T.object)]), + '#withData':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='data', type=d.T.object)]), withData(data): { data: data }, - '#withDataMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='data', type=d.T.object)]), + '#withDataMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='data', type=d.T.object)]), withDataMixin(data): { data+: data }, '#withRevision':: d.fn(help='"Revision indicates the revision of the state represented by Data."', args=[d.arg(name='revision', type=d.T.integer)]), withRevision(revision): { revision: revision }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet index 68fe17bcd9a..28fe965b6e6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='daemonSet', url='', help='"DaemonSet represents the configuration of a daemon set."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of DaemonSet', args=[d.arg(name='name', type=d.T.string)]), @@ -68,12 +66,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +80,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +102,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +163,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +183,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +194,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +230,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +244,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +262,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet index 8bb51aeb299..ef7c7925767 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet @@ -16,12 +16,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +30,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +52,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +113,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +133,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +144,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +180,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +194,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +212,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet similarity index 93% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet index 62d339edc9c..a174182b33a 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet @@ -15,7 +15,7 @@ withNumberAvailable(numberAvailable): { numberAvailable: numberAvailable }, '#withNumberMisscheduled':: d.fn(help='"The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/"', args=[d.arg(name='numberMisscheduled', type=d.T.integer)]), withNumberMisscheduled(numberMisscheduled): { numberMisscheduled: numberMisscheduled }, - '#withNumberReady':: d.fn(help='"The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready."', args=[d.arg(name='numberReady', type=d.T.integer)]), + '#withNumberReady':: d.fn(help='"numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition."', args=[d.arg(name='numberReady', type=d.T.integer)]), withNumberReady(numberReady): { numberReady: numberReady }, '#withNumberUnavailable':: d.fn(help='"The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)"', args=[d.arg(name='numberUnavailable', type=d.T.integer)]), withNumberUnavailable(numberUnavailable): { numberUnavailable: numberUnavailable }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet index 0556cdbc01c..fc3ceea4cf3 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='deployment', url='', help='"Deployment enables declarative updates for Pods and ReplicaSets."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Deployment', args=[d.arg(name='name', type=d.T.string)]), @@ -80,12 +78,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -96,21 +92,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -118,9 +114,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -179,6 +175,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -194,7 +195,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -205,26 +206,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -239,9 +242,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -253,11 +256,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -269,26 +274,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet index 90cc4395977..05f76111103 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet @@ -28,12 +28,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -44,21 +42,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -66,9 +64,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -127,6 +125,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -142,7 +145,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -153,26 +156,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -187,9 +192,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -201,11 +206,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -217,26 +224,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet similarity index 93% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet index 8519e533802..d2d2673434f 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet @@ -11,7 +11,7 @@ withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, '#withObservedGeneration':: d.fn(help='"The generation observed by the deployment controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"Total number of ready pods targeted by this deployment."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods targeted by this Deployment with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, '#withReplicas':: d.fn(help='"Total number of non-terminated pods targeted by this deployment (their labels match the selector)."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStrategy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStrategy.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStrategy.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStrategy.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet similarity index 89% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet index beecb814b3b..a450610b0fd 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet @@ -21,6 +21,8 @@ rollingUpdateStatefulSetStrategy: (import 'rollingUpdateStatefulSetStrategy.libsonnet'), statefulSet: (import 'statefulSet.libsonnet'), statefulSetCondition: (import 'statefulSetCondition.libsonnet'), + statefulSetOrdinals: (import 'statefulSetOrdinals.libsonnet'), + statefulSetPersistentVolumeClaimRetentionPolicy: (import 'statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet'), statefulSetSpec: (import 'statefulSetSpec.libsonnet'), statefulSetStatus: (import 'statefulSetStatus.libsonnet'), statefulSetUpdateStrategy: (import 'statefulSetUpdateStrategy.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet index c7a7e040d5c..2dc95569424 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='replicaSet', url='', help='"ReplicaSet ensures that a specified number of pod replicas are running at any given time."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ReplicaSet', args=[d.arg(name='name', type=d.T.string)]), @@ -68,12 +66,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +80,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +102,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +163,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +183,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +194,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +230,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +244,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +262,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet index db4677bb6b2..b172fef6266 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet @@ -16,12 +16,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +30,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +52,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +113,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +133,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +144,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +180,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +194,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +212,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet index b0702d1e06a..9d762ad0cb0 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet @@ -11,9 +11,9 @@ withFullyLabeledReplicas(fullyLabeledReplicas): { fullyLabeledReplicas: fullyLabeledReplicas }, '#withObservedGeneration':: d.fn(help='"ObservedGeneration reflects the generation of the most recently observed ReplicaSet."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"The number of ready replicas for this replica set."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, - '#withReplicas':: d.fn(help='"Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDeployment.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDeployment.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDeployment.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDeployment.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet new file mode 100644 index 00000000000..ecc7a1867cd --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rollingUpdateStatefulSetStrategy', url='', help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet index acf08a3d275..e1f5369fd24 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='statefulSet', url='', help='"StatefulSet represents a set of pods with consistent identities. Identities are defined as:\\n - Network: A single stable DNS and hostname.\\n - Storage: As many VolumeClaims as requested.\\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity."'), + '#':: d.pkg(name='statefulSet', url='', help='"StatefulSet represents a set of pods with consistent identities. Identities are defined as:\\n - Network: A single stable DNS and hostname.\\n - Storage: As many VolumeClaims as requested.\\n\\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StatefulSet', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,18 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"A StatefulSetSpec is the specification of a StatefulSet."'), spec: { + '#ordinals':: d.obj(help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + ordinals: { + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { spec+: { ordinals+: { start: start } } }, + }, + '#persistentVolumeClaimRetentionPolicy':: d.obj(help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + persistentVolumeClaimRetentionPolicy: { + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { spec+: { persistentVolumeClaimRetentionPolicy+: { whenDeleted: whenDeleted } } }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { spec+: { persistentVolumeClaimRetentionPolicy+: { whenScaled: whenScaled } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -68,12 +78,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +92,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +114,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +175,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +195,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +206,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +242,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +256,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +274,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -307,12 +332,16 @@ updateStrategy: { '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { spec+: { updateStrategy+: { rollingUpdate+: { maxUnavailable: maxUnavailable } } } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { updateStrategy+: { rollingUpdate+: { partition: partition } } } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { updateStrategy+: { type: type } } }, }, + '#withMinReadySeconds':: d.fn(help='"Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)"', args=[d.arg(name='minReadySeconds', type=d.T.integer)]), + withMinReadySeconds(minReadySeconds): { spec+: { minReadySeconds: minReadySeconds } }, '#withPodManagementPolicy':: d.fn(help='"podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once."', args=[d.arg(name='podManagementPolicy', type=d.T.string)]), withPodManagementPolicy(podManagementPolicy): { spec+: { podManagementPolicy: podManagementPolicy } }, '#withReplicas':: d.fn(help='"replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1."', args=[d.arg(name='replicas', type=d.T.integer)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet new file mode 100644 index 00000000000..d1fe8ab45ec --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statefulSetOrdinals', url='', help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { start: start }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet new file mode 100644 index 00000000000..024ca485364 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statefulSetPersistentVolumeClaimRetentionPolicy', url='', help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { whenDeleted: whenDeleted }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { whenScaled: whenScaled }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet index 074efb59057..95caa6c4202 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet @@ -1,6 +1,18 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='statefulSetSpec', url='', help='"A StatefulSetSpec is the specification of a StatefulSet."'), + '#ordinals':: d.obj(help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + ordinals: { + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { ordinals+: { start: start } }, + }, + '#persistentVolumeClaimRetentionPolicy':: d.obj(help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + persistentVolumeClaimRetentionPolicy: { + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { persistentVolumeClaimRetentionPolicy+: { whenDeleted: whenDeleted } }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { persistentVolumeClaimRetentionPolicy+: { whenScaled: whenScaled } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -16,12 +28,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +42,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +64,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +125,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +145,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +156,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +192,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +206,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +224,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -255,12 +282,16 @@ updateStrategy: { '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { updateStrategy+: { rollingUpdate+: { maxUnavailable: maxUnavailable } } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { updateStrategy+: { rollingUpdate+: { partition: partition } } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { updateStrategy+: { type: type } }, }, + '#withMinReadySeconds':: d.fn(help='"Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)"', args=[d.arg(name='minReadySeconds', type=d.T.integer)]), + withMinReadySeconds(minReadySeconds): { minReadySeconds: minReadySeconds }, '#withPodManagementPolicy':: d.fn(help='"podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once."', args=[d.arg(name='podManagementPolicy', type=d.T.string)]), withPodManagementPolicy(podManagementPolicy): { podManagementPolicy: podManagementPolicy }, '#withReplicas':: d.fn(help='"replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1."', args=[d.arg(name='replicas', type=d.T.integer)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet similarity index 88% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet index 50134d57c7e..3271d603af8 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet @@ -1,6 +1,8 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='statefulSetStatus', url='', help='"StatefulSetStatus represents the current state of a StatefulSet."'), + '#withAvailableReplicas':: d.fn(help='"Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset."', args=[d.arg(name='availableReplicas', type=d.T.integer)]), + withAvailableReplicas(availableReplicas): { availableReplicas: availableReplicas }, '#withCollisionCount':: d.fn(help='"collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision."', args=[d.arg(name='collisionCount', type=d.T.integer)]), withCollisionCount(collisionCount): { collisionCount: collisionCount }, '#withConditions':: d.fn(help="\"Represents the latest available observations of a statefulset's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), @@ -13,7 +15,7 @@ withCurrentRevision(currentRevision): { currentRevision: currentRevision }, '#withObservedGeneration':: d.fn(help="\"observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.\"", args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods created for this StatefulSet with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, '#withReplicas':: d.fn(help='"replicas is the number of Pods created by the StatefulSet controller."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet similarity index 52% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet index 765e934fc3f..de3704706f9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet @@ -3,7 +3,9 @@ '#':: d.pkg(name='statefulSetUpdateStrategy', url='', help='"StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy."'), '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { rollingUpdate+: { maxUnavailable: maxUnavailable } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { rollingUpdate+: { partition: partition } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet similarity index 79% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet index 79f614c9bef..91f176810b8 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet @@ -2,5 +2,6 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='authentication', url='', help=''), v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/boundObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/boundObjectReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/boundObjectReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/boundObjectReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet index 05cedc80c76..7953f90a2a7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet @@ -2,6 +2,8 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), boundObjectReference: (import 'boundObjectReference.libsonnet'), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), tokenRequest: (import 'tokenRequest.libsonnet'), tokenRequestSpec: (import 'tokenRequestSpec.libsonnet'), tokenRequestStatus: (import 'tokenRequestStatus.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..f213497b811 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet index 97c0634dd51..bb17efa3821 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest requests a token for a given service account."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of TokenRequest', args=[d.arg(name='name', type=d.T.string)]), @@ -64,9 +62,9 @@ '#withUid':: d.fn(help='"UID of the referent."', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { boundObjectRef+: { uid: uid } } }, }, - '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), withAudiences(audiences): { spec+: { audiences: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), withAudiencesMixin(audiences): { spec+: { audiences+: if std.isArray(v=audiences) then audiences else [audiences] } }, '#withExpirationSeconds':: d.fn(help="\"ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.\"", args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { spec+: { expirationSeconds: expirationSeconds } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet similarity index 68% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet index 534a6fb1be6..5666b2da721 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet @@ -12,9 +12,9 @@ '#withUid':: d.fn(help='"UID of the referent."', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { boundObjectRef+: { uid: uid } }, }, - '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, '#withExpirationSeconds':: d.fn(help="\"ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.\"", args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet index c23b439d80c..e91e9bccf47 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='tokenReview', url='', help='"TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of TokenReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/userInfo.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/userInfo.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..a21668c4edb --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..9e2551741b5 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1alpha1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet new file mode 100644 index 00000000000..4200edfc4c3 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..2fadc08a1af --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1beta1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet index c56c20bb553..3f444d121b0 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='authorization', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet index 3c0b160e00a..78cc42d028c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='localSubjectAccessReview', url='', help='"LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of LocalSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceAttributes.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceAttributes.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceRule.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceAttributes.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceAttributes.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceRule.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet index 4ea9baedc76..d8fc6e1c910 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='selfSubjectAccessReview', url='', help='"SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \\"in all namespaces\\". Self is a special case, because users should always be able to check whether they can perform an action"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SelfSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet index 5fddc7f72a2..a92ebdd26c2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='selfSubjectRulesReview', url='', help="\"SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SelfSubjectRulesReview', args=[d.arg(name='name', type=d.T.string)]), @@ -51,7 +49,7 @@ apiVersion: 'authorization.k8s.io/v1', kind: 'SelfSubjectRulesReview', } + self.metadata.withName(name=name), - '#spec':: d.obj(help=''), + '#spec':: d.obj(help='"SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview."'), spec: { '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { namespace: namespace } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet similarity index 64% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet index 901f17b4af6..d38eb6ffaea 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help=''), + '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help='"SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview."'), '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet index 9f02e24779a..0f9e2dfde47 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='subjectAccessReview', url='', help='"SubjectAccessReview checks whether or not a user or group can perform an action."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet similarity index 60% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet index 69a8e08c746..d93fba44a27 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet @@ -2,6 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='autoscaling', url='', help=''), v1: (import 'v1/main.libsonnet'), - v2beta1: (import 'v2beta1/main.libsonnet'), - v2beta2: (import 'v2beta2/main.libsonnet'), + v2: (import 'v2/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet new file mode 100644 index 00000000000..04c0e544aa8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet similarity index 79% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet index e44700dbf7e..8af49e01cb2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"configuration of a horizontal pod autoscaler."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), @@ -55,18 +53,18 @@ spec: { '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleTargetRef+: { name: name } } }, }, - '#withMaxReplicas':: d.fn(help='"upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), + '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), withMaxReplicas(maxReplicas): { spec+: { maxReplicas: maxReplicas } }, '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), withMinReplicas(minReplicas): { spec+: { minReplicas: minReplicas } }, - '#withTargetCPUUtilizationPercentage':: d.fn(help='"target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), + '#withTargetCPUUtilizationPercentage':: d.fn(help='"targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), withTargetCPUUtilizationPercentage(targetCPUUtilizationPercentage): { spec+: { targetCPUUtilizationPercentage: targetCPUUtilizationPercentage } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet index 21610e5124c..c23f8a26d76 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet @@ -3,18 +3,18 @@ '#':: d.pkg(name='horizontalPodAutoscalerSpec', url='', help='"specification of a horizontal pod autoscaler."'), '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleTargetRef+: { name: name } }, }, - '#withMaxReplicas':: d.fn(help='"upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), + '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), withMaxReplicas(maxReplicas): { maxReplicas: maxReplicas }, '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), withMinReplicas(minReplicas): { minReplicas: minReplicas }, - '#withTargetCPUUtilizationPercentage':: d.fn(help='"target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), + '#withTargetCPUUtilizationPercentage':: d.fn(help='"targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), withTargetCPUUtilizationPercentage(targetCPUUtilizationPercentage): { targetCPUUtilizationPercentage: targetCPUUtilizationPercentage }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet similarity index 51% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet index 97bab8e1e7f..d65ef26bddc 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet @@ -1,15 +1,15 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='horizontalPodAutoscalerStatus', url='', help='"current status of a horizontal pod autoscaler"'), - '#withCurrentCPUUtilizationPercentage':: d.fn(help='"current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU."', args=[d.arg(name='currentCPUUtilizationPercentage', type=d.T.integer)]), + '#withCurrentCPUUtilizationPercentage':: d.fn(help='"currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU."', args=[d.arg(name='currentCPUUtilizationPercentage', type=d.T.integer)]), withCurrentCPUUtilizationPercentage(currentCPUUtilizationPercentage): { currentCPUUtilizationPercentage: currentCPUUtilizationPercentage }, - '#withCurrentReplicas':: d.fn(help='"current number of replicas of pods managed by this autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), + '#withCurrentReplicas':: d.fn(help='"currentReplicas is the current number of replicas of pods managed by this autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), withCurrentReplicas(currentReplicas): { currentReplicas: currentReplicas }, - '#withDesiredReplicas':: d.fn(help='"desired number of replicas of pods managed by this autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), + '#withDesiredReplicas':: d.fn(help='"desiredReplicas is the desired number of replicas of pods managed by this autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), withDesiredReplicas(desiredReplicas): { desiredReplicas: desiredReplicas }, '#withLastScaleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScaleTime', type=d.T.string)]), withLastScaleTime(lastScaleTime): { lastScaleTime: lastScaleTime }, - '#withObservedGeneration':: d.fn(help='"most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + '#withObservedGeneration':: d.fn(help='"observedGeneration is the most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet index 62241a0c3dd..3227c261fe6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='scale', url='', help='"Scale represents a scaling request for a resource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Scale', args=[d.arg(name='name', type=d.T.string)]), @@ -53,7 +51,7 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"ScaleSpec describes the attributes of a scale subresource."'), spec: { - '#withReplicas':: d.fn(help='"desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"replicas is the desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { spec+: { replicas: replicas } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet similarity index 62% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet index a3c6898d409..129ad5527b7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='scaleSpec', url='', help='"ScaleSpec describes the attributes of a scale subresource."'), - '#withReplicas':: d.fn(help='"desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"replicas is the desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet new file mode 100644 index 00000000000..28af9c0cc5b --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleStatus', url='', help='"ScaleStatus represents the current status of a scale subresource."'), + '#withReplicas':: d.fn(help='"replicas is the actual number of observed instances of the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + withReplicas(replicas): { replicas: replicas }, + '#withSelector':: d.fn(help='"selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='selector', type=d.T.string)]), + withSelector(selector): { selector: selector }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet new file mode 100644 index 00000000000..9887c4c9403 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { container: container }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet new file mode 100644 index 00000000000..70708c85b31 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { container: container }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet new file mode 100644 index 00000000000..04c0e544aa8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet new file mode 100644 index 00000000000..edd8e7ffd6a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet new file mode 100644 index 00000000000..295eef8ea87 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet index b9a30db9fa2..7b3c26c5526 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'autoscaling/v2beta2', + apiVersion: 'autoscaling/v2', kind: 'HorizontalPodAutoscaler', } + self.metadata.withName(name=name), '#spec':: d.obj(help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), @@ -61,9 +59,9 @@ withPolicies(policies): { spec+: { behavior+: { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { spec+: { behavior+: { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { spec+: { behavior+: { scaleDown+: { selectPolicy: selectPolicy } } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { spec+: { behavior+: { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -72,19 +70,19 @@ withPolicies(policies): { spec+: { behavior+: { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { spec+: { behavior+: { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { spec+: { behavior+: { scaleUp+: { selectPolicy: selectPolicy } } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { spec+: { behavior+: { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } } }, }, }, '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleTargetRef+: { name: name } } }, }, '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet similarity index 93% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet index 5cb33acd711..858546ce3c6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet @@ -7,9 +7,9 @@ withPolicies(policies): { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { scaleDown+: { selectPolicy: selectPolicy } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -18,9 +18,9 @@ withPolicies(policies): { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { scaleUp+: { selectPolicy: selectPolicy } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet index efe9096f266..823633e412f 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet @@ -9,9 +9,9 @@ withPolicies(policies): { behavior+: { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { behavior+: { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { behavior+: { scaleDown+: { selectPolicy: selectPolicy } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { behavior+: { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -20,19 +20,19 @@ withPolicies(policies): { behavior+: { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { behavior+: { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { behavior+: { scaleUp+: { selectPolicy: selectPolicy } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { behavior+: { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } }, }, }, '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleTargetRef+: { name: name } }, }, '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet similarity index 79% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet index 43c29a9ae0d..42d501bb118 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='hpaScalingPolicy', url='', help='"HPAScalingPolicy is a single policy which must hold true for a specified past interval."'), - '#withPeriodSeconds':: d.fn(help='"PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min)."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + '#withPeriodSeconds':: d.fn(help='"periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min)."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), withPeriodSeconds(periodSeconds): { periodSeconds: periodSeconds }, - '#withType':: d.fn(help='"Type is used to specify the scaling policy."', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type is used to specify the scaling policy."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, - '#withValue':: d.fn(help='"Value contains the amount of change which is permitted by the policy. It must be greater than zero"', args=[d.arg(name='value', type=d.T.integer)]), + '#withValue':: d.fn(help='"value contains the amount of change which is permitted by the policy. It must be greater than zero"', args=[d.arg(name='value', type=d.T.integer)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet similarity index 93% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet index 403302875fa..7579f71dc51 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet @@ -5,9 +5,9 @@ withPolicies(policies): { policies: if std.isArray(v=policies) then policies else [policies] }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { policies+: if std.isArray(v=policies) then policies else [policies] }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { selectPolicy: selectPolicy }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { stabilizationWindowSeconds: stabilizationWindowSeconds }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet similarity index 97% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet index 537b89b6cb1..4db7c791f37 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v2beta2', url='', help=''), + '#':: d.pkg(name='v2', url='', help=''), containerResourceMetricSource: (import 'containerResourceMetricSource.libsonnet'), containerResourceMetricStatus: (import 'containerResourceMetricStatus.libsonnet'), crossVersionObjectReference: (import 'crossVersionObjectReference.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricIdentifier.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricIdentifier.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricIdentifier.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricIdentifier.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet new file mode 100644 index 00000000000..6ad19a98920 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet @@ -0,0 +1,141 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), + '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + containerResource: { + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { containerResource+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { containerResource+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { containerResource+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { containerResource+: { target+: { value: value } } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { containerResource+: { container: container } }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { containerResource+: { name: name } }, + }, + '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), + external: { + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { external+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { external+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { external+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { external+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { external+: { target+: { value: value } } }, + }, + }, + '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + object: { + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { object+: { describedObject+: { kind: kind } } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { describedObject+: { name: name } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { object+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { object+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { object+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { object+: { target+: { value: value } } }, + }, + }, + '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), + pods: { + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { pods+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { pods+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { pods+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { pods+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { pods+: { target+: { value: value } } }, + }, + }, + '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + resource: { + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { resource+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { resource+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { resource+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { resource+: { target+: { value: value } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { resource+: { name: name } }, + }, + '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet new file mode 100644 index 00000000000..9ccb2a9f38b --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet @@ -0,0 +1,131 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), + '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + containerResource: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { containerResource+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { containerResource+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { containerResource+: { current+: { value: value } } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { containerResource+: { container: container } }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { containerResource+: { name: name } }, + }, + '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), + external: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { external+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { external+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { external+: { current+: { value: value } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { external+: { metric+: { name: name } } }, + }, + }, + '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + object: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { object+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { object+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { object+: { current+: { value: value } } }, + }, + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { object+: { describedObject+: { kind: kind } } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { describedObject+: { name: name } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { metric+: { name: name } } }, + }, + }, + '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), + pods: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { pods+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { pods+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { pods+: { current+: { value: value } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { pods+: { metric+: { name: name } } }, + }, + }, + '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + resource: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { resource+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { resource+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { resource+: { current+: { value: value } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { resource+: { name: name } }, + }, + '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet new file mode 100644 index 00000000000..a73f1451ad0 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricTarget', url='', help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { averageValue: averageValue }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet new file mode 100644 index 00000000000..51dec374da6 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricValueStatus', url='', help='"MetricValueStatus holds the current value for a metric"'), + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { averageValue: averageValue }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet new file mode 100644 index 00000000000..efd9a4ba1b0 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet @@ -0,0 +1,42 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { describedObject+: { kind: kind } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { describedObject+: { name: name } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet new file mode 100644 index 00000000000..171bbdd97d6 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet @@ -0,0 +1,40 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { describedObject+: { kind: kind } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { describedObject+: { name: name } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet new file mode 100644 index 00000000000..ecb6d1b6de8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet new file mode 100644 index 00000000000..3e1642d991d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet new file mode 100644 index 00000000000..6417c9ad6cd --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet new file mode 100644 index 00000000000..7b6da96e998 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet index f2a9271adbe..7d9c1089d85 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='batch', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet index 3a7d672898a..8966fb9e8f0 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='cronJob', url='', help='"CronJob represents the configuration of a single cron job."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CronJob', args=[d.arg(name='name', type=d.T.string)]), @@ -57,12 +55,10 @@ jobTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { jobTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -73,21 +69,21 @@ withFinalizers(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { jobTemplate+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { jobTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { jobTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { jobTemplate+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { jobTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { jobTemplate+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -95,13 +91,20 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { jobTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { jobTemplate+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { jobTemplate+: { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { jobTemplate+: { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -117,12 +120,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -133,21 +134,21 @@ withFinalizers(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, @@ -155,9 +156,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -216,6 +217,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { os+: { name: name } } } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -231,7 +237,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } } }, @@ -242,26 +248,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -276,9 +284,9 @@ withDnsPolicy(dnsPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, @@ -290,11 +298,13 @@ withHostNetwork(hostNetwork): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostUsers: hostUsers } } } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, @@ -306,26 +316,34 @@ withNodeSelector(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -356,21 +374,27 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { jobTemplate+: { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } } } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { jobTemplate+: { spec+: { completionMode: completionMode } } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { jobTemplate+: { spec+: { completions: completions } } } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { jobTemplate+: { spec+: { manualSelector: manualSelector } } } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { jobTemplate+: { spec+: { maxFailedIndexes: maxFailedIndexes } } } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { jobTemplate+: { spec+: { parallelism: parallelism } } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { jobTemplate+: { spec+: { podReplacementPolicy: podReplacementPolicy } } } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { jobTemplate+: { spec+: { suspend: suspend } } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } } }, }, }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), + '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), withConcurrencyPolicy(concurrencyPolicy): { spec+: { concurrencyPolicy: concurrencyPolicy } }, '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), withFailedJobsHistoryLimit(failedJobsHistoryLimit): { spec+: { failedJobsHistoryLimit: failedJobsHistoryLimit } }, @@ -382,6 +406,8 @@ withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { spec+: { successfulJobsHistoryLimit: successfulJobsHistoryLimit } }, '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, + '#withTimeZone':: d.fn(help='"The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones"', args=[d.arg(name='timeZone', type=d.T.string)]), + withTimeZone(timeZone): { spec+: { timeZone: timeZone } }, }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet index 8af5f1b1234..3cae089c594 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet @@ -5,12 +5,10 @@ jobTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { jobTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { jobTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { jobTemplate+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { jobTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { jobTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { jobTemplate+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { jobTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { jobTemplate+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,13 +41,20 @@ withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { jobTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { jobTemplate+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { jobTemplate+: { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { jobTemplate+: { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -65,12 +70,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -81,21 +84,21 @@ withFinalizers(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, @@ -103,9 +106,9 @@ withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -164,6 +167,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { jobTemplate+: { spec+: { template+: { spec+: { os+: { name: name } } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -179,7 +187,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } }, @@ -190,26 +198,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -224,9 +234,9 @@ withDnsPolicy(dnsPolicy): { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, @@ -238,11 +248,13 @@ withHostNetwork(hostNetwork): { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { jobTemplate+: { spec+: { template+: { spec+: { hostUsers: hostUsers } } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, @@ -254,26 +266,34 @@ withNodeSelector(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -304,21 +324,27 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { jobTemplate+: { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { jobTemplate+: { spec+: { completionMode: completionMode } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { jobTemplate+: { spec+: { completions: completions } } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { jobTemplate+: { spec+: { manualSelector: manualSelector } } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { jobTemplate+: { spec+: { maxFailedIndexes: maxFailedIndexes } } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { jobTemplate+: { spec+: { parallelism: parallelism } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { jobTemplate+: { spec+: { podReplacementPolicy: podReplacementPolicy } } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { jobTemplate+: { spec+: { suspend: suspend } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } }, }, }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), + '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), withConcurrencyPolicy(concurrencyPolicy): { concurrencyPolicy: concurrencyPolicy }, '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), withFailedJobsHistoryLimit(failedJobsHistoryLimit): { failedJobsHistoryLimit: failedJobsHistoryLimit }, @@ -330,6 +356,8 @@ withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { successfulJobsHistoryLimit: successfulJobsHistoryLimit }, '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { suspend: suspend }, + '#withTimeZone':: d.fn(help='"The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones"', args=[d.arg(name='timeZone', type=d.T.string)]), + withTimeZone(timeZone): { timeZone: timeZone }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet index bd07c0ceebc..d4fea6172ab 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='job', url='', help='"Job represents the configuration of a single job."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Job', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,13 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -68,12 +73,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +87,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +109,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +170,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +190,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +201,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +237,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +251,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +269,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -307,17 +327,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { completions: completions } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { maxFailedIndexes: maxFailedIndexes } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { podReplacementPolicy: podReplacementPolicy } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet index dee8948c13c..3f02a1c564a 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet @@ -1,6 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='jobSpec', url='', help='"JobSpec describes how the job execution will look like."'), + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -16,12 +23,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +37,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +59,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +120,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +140,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +151,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +187,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +201,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +219,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -255,17 +277,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { activeDeadlineSeconds: activeDeadlineSeconds }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { backoffLimit: backoffLimit }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { backoffLimitPerIndex: backoffLimitPerIndex }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { completionMode: completionMode }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { completions: completions }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { manualSelector: manualSelector }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { maxFailedIndexes: maxFailedIndexes }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { parallelism: parallelism }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { podReplacementPolicy: podReplacementPolicy }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { suspend: suspend }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { ttlSecondsAfterFinished: ttlSecondsAfterFinished }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet index 1bb4ea9d808..f05416634de 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet @@ -1,9 +1,20 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='jobStatus', url='', help='"JobStatus represents the current state of a Job."'), - '#withActive':: d.fn(help='"The number of actively running pods."', args=[d.arg(name='active', type=d.T.integer)]), + '#uncountedTerminatedPods':: d.obj(help="\"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.\""), + uncountedTerminatedPods: { + '#withFailed':: d.fn(help='"failed holds UIDs of failed Pods."', args=[d.arg(name='failed', type=d.T.array)]), + withFailed(failed): { uncountedTerminatedPods+: { failed: if std.isArray(v=failed) then failed else [failed] } }, + '#withFailedMixin':: d.fn(help='"failed holds UIDs of failed Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='failed', type=d.T.array)]), + withFailedMixin(failed): { uncountedTerminatedPods+: { failed+: if std.isArray(v=failed) then failed else [failed] } }, + '#withSucceeded':: d.fn(help='"succeeded holds UIDs of succeeded Pods."', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceeded(succeeded): { uncountedTerminatedPods+: { succeeded: if std.isArray(v=succeeded) then succeeded else [succeeded] } }, + '#withSucceededMixin':: d.fn(help='"succeeded holds UIDs of succeeded Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceededMixin(succeeded): { uncountedTerminatedPods+: { succeeded+: if std.isArray(v=succeeded) then succeeded else [succeeded] } }, + }, + '#withActive':: d.fn(help='"The number of pending and running pods."', args=[d.arg(name='active', type=d.T.integer)]), withActive(active): { active: active }, - '#withCompletedIndexes':: d.fn(help='"CompletedIndexes holds the completed indexes when .spec.completionMode = \\"Indexed\\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\"."', args=[d.arg(name='completedIndexes', type=d.T.string)]), + '#withCompletedIndexes':: d.fn(help='"completedIndexes holds the completed indexes when .spec.completionMode = \\"Indexed\\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\"."', args=[d.arg(name='completedIndexes', type=d.T.string)]), withCompletedIndexes(completedIndexes): { completedIndexes: completedIndexes }, '#withCompletionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='completionTime', type=d.T.string)]), withCompletionTime(completionTime): { completionTime: completionTime }, @@ -13,10 +24,16 @@ withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, '#withFailed':: d.fn(help='"The number of pods which reached phase Failed."', args=[d.arg(name='failed', type=d.T.integer)]), withFailed(failed): { failed: failed }, + '#withFailedIndexes':: d.fn(help='"FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='failedIndexes', type=d.T.string)]), + withFailedIndexes(failedIndexes): { failedIndexes: failedIndexes }, + '#withReady':: d.fn(help='"The number of pods which have a Ready condition."', args=[d.arg(name='ready', type=d.T.integer)]), + withReady(ready): { ready: ready }, '#withStartTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='startTime', type=d.T.string)]), withStartTime(startTime): { startTime: startTime }, '#withSucceeded':: d.fn(help='"The number of pods which reached phase Succeeded."', args=[d.arg(name='succeeded', type=d.T.integer)]), withSucceeded(succeeded): { succeeded: succeeded }, + '#withTerminating':: d.fn(help='"The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\\n\\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default)."', args=[d.arg(name='terminating', type=d.T.integer)]), + withTerminating(terminating): { terminating: terminating }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet index 5b4af07f25b..18b9fded040 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='jobTemplateSpec', url='', help='"JobTemplateSpec describes the data a Job should have when created from a template"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,13 +39,20 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -63,12 +68,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -79,21 +82,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -101,9 +104,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -162,6 +165,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -177,7 +185,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -188,26 +196,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -222,9 +232,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -236,11 +246,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -252,26 +264,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -302,17 +322,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { completions: completions } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { maxFailedIndexes: maxFailedIndexes } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { podReplacementPolicy: podReplacementPolicy } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet index 1d5e7ebdf2e..d3df3a55591 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet @@ -9,4 +9,9 @@ jobSpec: (import 'jobSpec.libsonnet'), jobStatus: (import 'jobStatus.libsonnet'), jobTemplateSpec: (import 'jobTemplateSpec.libsonnet'), + podFailurePolicy: (import 'podFailurePolicy.libsonnet'), + podFailurePolicyOnExitCodesRequirement: (import 'podFailurePolicyOnExitCodesRequirement.libsonnet'), + podFailurePolicyOnPodConditionsPattern: (import 'podFailurePolicyOnPodConditionsPattern.libsonnet'), + podFailurePolicyRule: (import 'podFailurePolicyRule.libsonnet'), + uncountedTerminatedPods: (import 'uncountedTerminatedPods.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet new file mode 100644 index 00000000000..29a48411984 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicy', url='', help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet new file mode 100644 index 00000000000..0a81d286d36 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyOnExitCodesRequirement', url='', help='"PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check."'), + '#withContainerName':: d.fn(help='"Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template."', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { containerName: containerName }, + '#withOperator':: d.fn(help="\"Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\\n\\n- In: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is in the set of specified values.\\n- NotIn: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is not in the set of specified values.\\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"", args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet new file mode 100644 index 00000000000..6037e13c8ae --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyOnPodConditionsPattern', url='', help='"PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type."'), + '#withType':: d.fn(help='"Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet new file mode 100644 index 00000000000..9134be64ec4 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyRule', url='', help='"PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule."'), + '#onExitCodes':: d.obj(help='"PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check."'), + onExitCodes: { + '#withContainerName':: d.fn(help='"Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template."', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { onExitCodes+: { containerName: containerName } }, + '#withOperator':: d.fn(help="\"Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\\n\\n- In: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is in the set of specified values.\\n- NotIn: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is not in the set of specified values.\\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { onExitCodes+: { operator: operator } }, + '#withValues':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"", args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { onExitCodes+: { values: if std.isArray(v=values) then values else [values] } }, + '#withValuesMixin':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { onExitCodes+: { values+: if std.isArray(v=values) then values else [values] } }, + }, + '#withAction':: d.fn(help="\"Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\\n\\n- FailJob: indicates that the pod's job is marked as Failed and all\\n running pods are terminated.\\n- FailIndex: indicates that the pod's index is marked as Failed and will\\n not be restarted.\\n This value is beta-level. It can be used when the\\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\\n- Ignore: indicates that the counter towards the .backoffLimit is not\\n incremented and a replacement pod is created.\\n- Count: indicates that the pod is handled in the default way - the\\n counter towards the .backoffLimit is incremented.\\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\"", args=[d.arg(name='action', type=d.T.string)]), + withAction(action): { action: action }, + '#withOnPodConditions':: d.fn(help='"Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed."', args=[d.arg(name='onPodConditions', type=d.T.array)]), + withOnPodConditions(onPodConditions): { onPodConditions: if std.isArray(v=onPodConditions) then onPodConditions else [onPodConditions] }, + '#withOnPodConditionsMixin':: d.fn(help='"Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='onPodConditions', type=d.T.array)]), + withOnPodConditionsMixin(onPodConditions): { onPodConditions+: if std.isArray(v=onPodConditions) then onPodConditions else [onPodConditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet new file mode 100644 index 00000000000..482a57a3b19 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='uncountedTerminatedPods', url='', help="\"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.\""), + '#withFailed':: d.fn(help='"failed holds UIDs of failed Pods."', args=[d.arg(name='failed', type=d.T.array)]), + withFailed(failed): { failed: if std.isArray(v=failed) then failed else [failed] }, + '#withFailedMixin':: d.fn(help='"failed holds UIDs of failed Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='failed', type=d.T.array)]), + withFailedMixin(failed): { failed+: if std.isArray(v=failed) then failed else [failed] }, + '#withSucceeded':: d.fn(help='"succeeded holds UIDs of succeeded Pods."', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceeded(succeeded): { succeeded: if std.isArray(v=succeeded) then succeeded else [succeeded] }, + '#withSucceededMixin':: d.fn(help='"succeeded holds UIDs of succeeded Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceededMixin(succeeded): { succeeded+: if std.isArray(v=succeeded) then succeeded else [succeeded] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet index 2fc91e17e6c..a1350a0bee7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet @@ -2,5 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='certificates', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet index 45845724d08..ae3c70e38ca 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='certificateSigningRequest', url='', help='"CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\\n\\nKubelets use this API to obtain:\\n 1. client certificates to authenticate to kube-apiserver (with the \\"kubernetes.io/kube-apiserver-client-kubelet\\" signerName).\\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \\"kubernetes.io/kubelet-serving\\" signerName).\\n\\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \\"kubernetes.io/kube-apiserver-client\\" signerName), or to obtain certificates from custom non-Kubernetes signers."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CertificateSigningRequest', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,8 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"CertificateSigningRequestSpec contains the certificate request."'), spec: { + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\\n\\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\\n\\nCertificate signers may not honor this field for various reasons:\\n\\n 1. Old signer that is unaware of the field (such as the in-tree\\n implementations prior to v1.22)\\n 2. Signer whose configured maximum is shorter than the requested duration\\n 3. Signer whose configured minimum is longer than the requested duration\\n\\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { spec+: { expirationSeconds: expirationSeconds } }, '#withExtra':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."', args=[d.arg(name='extra', type=d.T.object)]), withExtra(extra): { spec+: { extra: extra } }, '#withExtraMixin':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet index 49712d490d6..34fd3daa912 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet @@ -1,6 +1,8 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='certificateSigningRequestSpec', url='', help='"CertificateSigningRequestSpec contains the certificate request."'), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\\n\\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\\n\\nCertificate signers may not honor this field for various reasons:\\n\\n 1. Old signer that is unaware of the field (such as the in-tree\\n implementations prior to v1.22)\\n 2. Signer whose configured maximum is shorter than the requested duration\\n 3. Signer whose configured minimum is longer than the requested duration\\n\\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, '#withExtra':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."', args=[d.arg(name='extra', type=d.T.object)]), withExtra(extra): { extra: extra }, '#withExtraMixin':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet new file mode 100644 index 00000000000..939b7162a99 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet @@ -0,0 +1,61 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundle', url='', help='"ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\\n\\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\\n\\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ClusterTrustBundle', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'certificates.k8s.io/v1alpha1', + kind: 'ClusterTrustBundle', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ClusterTrustBundleSpec contains the signer and trust anchors."'), + spec: { + '#withSignerName':: d.fn(help="\"signerName indicates the associated signer, if any.\\n\\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\\n\\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\\n\\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\\n\\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.\"", args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { spec+: { signerName: signerName } }, + '#withTrustBundle':: d.fn(help='"trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\\n\\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\\n\\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data."', args=[d.arg(name='trustBundle', type=d.T.string)]), + withTrustBundle(trustBundle): { spec+: { trustBundle: trustBundle } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet new file mode 100644 index 00000000000..1c3be20f16d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundleSpec', url='', help='"ClusterTrustBundleSpec contains the signer and trust anchors."'), + '#withSignerName':: d.fn(help="\"signerName indicates the associated signer, if any.\\n\\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\\n\\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\\n\\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\\n\\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.\"", args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { signerName: signerName }, + '#withTrustBundle':: d.fn(help='"trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\\n\\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\\n\\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data."', args=[d.arg(name='trustBundle', type=d.T.string)]), + withTrustBundle(trustBundle): { trustBundle: trustBundle }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..697fa66fc89 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + clusterTrustBundle: (import 'clusterTrustBundle.libsonnet'), + clusterTrustBundleSpec: (import 'clusterTrustBundleSpec.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet index 9d8b2649c6c..cad91a8c74b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='coordination', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet index 1baaa20ae64..4e89e2b3ac4 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='lease', url='', help='"Lease defines a lease concept."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Lease', args=[d.arg(name='name', type=d.T.string)]), @@ -57,7 +55,7 @@ withAcquireTime(acquireTime): { spec+: { acquireTime: acquireTime } }, '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), withHolderIdentity(holderIdentity): { spec+: { holderIdentity: holderIdentity } }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), + '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), withLeaseDurationSeconds(leaseDurationSeconds): { spec+: { leaseDurationSeconds: leaseDurationSeconds } }, '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), withLeaseTransitions(leaseTransitions): { spec+: { leaseTransitions: leaseTransitions } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet similarity index 95% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet index 3f501306ad4..af47be3597c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet @@ -5,7 +5,7 @@ withAcquireTime(acquireTime): { acquireTime: acquireTime }, '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), withHolderIdentity(holderIdentity): { holderIdentity: holderIdentity }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), + '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), withLeaseDurationSeconds(leaseDurationSeconds): { leaseDurationSeconds: leaseDurationSeconds }, '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), withLeaseTransitions(leaseTransitions): { leaseTransitions: leaseTransitions }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/affinity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/affinity.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/affinity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/affinity.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/attachedVolume.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/attachedVolume.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/attachedVolume.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/attachedVolume.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet new file mode 100644 index 00000000000..61e425f4bc0 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='awsElasticBlockStoreVolumeSource', url='', help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { volumeID: volumeID }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..195149ef3ba --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureDiskVolumeSource', url='', help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + withCachingMode(cachingMode): { cachingMode: cachingMode }, + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + withDiskName(diskName): { diskName: diskName }, + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + withDiskURI(diskURI): { diskURI: diskURI }, + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..38119e4afd5 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureFilePersistentVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + withSecretNamespace(secretNamespace): { secretNamespace: secretNamespace }, + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { shareName: shareName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet new file mode 100644 index 00000000000..04663cea5fc --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureFileVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { shareName: shareName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet index 43d95c1b961..3746613fcad 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='binding', url='', help='"Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Binding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/capabilities.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/capabilities.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/capabilities.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/capabilities.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..1215a07c221 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='cephFSPersistentVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { secretFile: secretFile }, + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet new file mode 100644 index 00000000000..ff61be5fc51 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='cephFSVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { secretFile: secretFile }, + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet similarity index 52% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet index 8d2af89ee9f..ca00d729ebb 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet @@ -3,16 +3,16 @@ '#':: d.pkg(name='cinderPersistentVolumeSource', url='', help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { secretRef+: { namespace: namespace } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet similarity index 54% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet index 1479ec43262..68b81ae4361 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet @@ -6,11 +6,11 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet new file mode 100644 index 00000000000..8753fafe3fc --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='claimSource', url='', help='"ClaimSource describes a reference to a ResourceClaim.\\n\\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value."'), + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { resourceClaimName: resourceClaimName }, + '#withResourceClaimTemplateName':: d.fn(help='"ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\\n\\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\\n\\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."', args=[d.arg(name='resourceClaimTemplateName', type=d.T.string)]), + withResourceClaimTemplateName(resourceClaimTemplateName): { resourceClaimTemplateName: resourceClaimTemplateName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/clientIPConfig.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clientIPConfig.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/clientIPConfig.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clientIPConfig.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet new file mode 100644 index 00000000000..0aa0424b5cd --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundleProjection', url='', help='"ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem."'), + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { signerName: signerName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet index 87b93c0128b..3fb5e6e5ec2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='componentStatus', url='', help='"ComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ComponentStatus', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet index 09013c5f2cd..126d23c34ea 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='configMap', url='', help='"ConfigMap holds configuration data for pods to consume."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ConfigMap', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapEnvSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapEnvSource.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapEnvSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapEnvSource.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapKeySelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapKeySelector.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapKeySelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapKeySelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet index 251e5e51933..f3271d8907c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapNodeConfigSource', url='', help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#':: d.pkg(name='configMapNodeConfigSource', url='', help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { kubeletConfigKey: kubeletConfigKey }, '#withName':: d.fn(help='"Name is the metadata.name of the referenced ConfigMap. This field is required in all cases."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet new file mode 100644 index 00000000000..3059d353b00 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configMapProjection', url='', help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet new file mode 100644 index 00000000000..ec7e2d315a3 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configMapVolumeSource', url='', help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet similarity index 78% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet index 1828a2297cc..8a9e4ec56b9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet @@ -3,7 +3,7 @@ '#':: d.pkg(name='container', url='', help='"A single application container that you want to run within a pod."'), '#lifecycle':: d.obj(help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), lifecycle: { - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -27,6 +27,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -35,7 +40,7 @@ withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -59,6 +64,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -77,6 +87,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -107,7 +124,7 @@ withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, @@ -121,6 +138,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -151,20 +175,24 @@ withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, }, '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#securityContext':: d.obj(help='"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence."'), @@ -193,7 +221,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -204,22 +232,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { securityContext+: { privileged: privileged } }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { securityContext+: { procMount: procMount } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, }, '#startupProbe':: d.obj(help='"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic."'), @@ -231,6 +261,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -261,18 +298,18 @@ withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, }, - '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), withArgs(args): { args: if std.isArray(v=args) then args else [args] }, - '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, - '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), withCommand(command): { command: if std.isArray(v=command) then command else [command] }, - '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, '#withEnv':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."', args=[d.arg(name='env', type=d.T.array)]), withEnv(env): { env: if std.isArray(v=env) then env else [env] }, @@ -282,16 +319,22 @@ withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, - '#withImage':: d.fn(help='"Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, '#withImagePullPolicy':: d.fn(help='"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, '#withName':: d.fn(help='"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withPorts':: d.fn(help='"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Cannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), + '#withPorts':: d.fn(help='"List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help="\"RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \\\"Always\\\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \\\"Always\\\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \\\"Always\\\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \\\"sidecar\\\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.\"", args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), withStdin(stdin): { stdin: stdin }, '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet similarity index 61% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet index 01cd00d3867..d5dfb1d671e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='containerImage', url='', help='"Describe a container image"'), - '#withNames':: d.fn(help='"Names by which this image is known. e.g. [\\"k8s.gcr.io/hyperkube:v1.0.7\\", \\"dockerhub.io/google_containers/hyperkube:v1.0.7\\"]"', args=[d.arg(name='names', type=d.T.array)]), + '#withNames':: d.fn(help='"Names by which this image is known. e.g. [\\"kubernetes.example/hyperkube:v1.0.7\\", \\"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\\"]"', args=[d.arg(name='names', type=d.T.array)]), withNames(names): { names: if std.isArray(v=names) then names else [names] }, - '#withNamesMixin':: d.fn(help='"Names by which this image is known. e.g. [\\"k8s.gcr.io/hyperkube:v1.0.7\\", \\"dockerhub.io/google_containers/hyperkube:v1.0.7\\"]"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='names', type=d.T.array)]), + '#withNamesMixin':: d.fn(help='"Names by which this image is known. e.g. [\\"kubernetes.example/hyperkube:v1.0.7\\", \\"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\\"]"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='names', type=d.T.array)]), withNamesMixin(names): { names+: if std.isArray(v=names) then names else [names] }, '#withSizeBytes':: d.fn(help='"The size of the image in bytes."', args=[d.arg(name='sizeBytes', type=d.T.integer)]), withSizeBytes(sizeBytes): { sizeBytes: sizeBytes }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerPort.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerPort.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerPort.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet new file mode 100644 index 00000000000..430d614a83d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResizePolicy', url='', help='"ContainerResizePolicy represents resource resize policy for the container."'), + '#withResourceName':: d.fn(help='"Name of the resource to which this resource resize policy applies. Supported values: cpu, memory."', args=[d.arg(name='resourceName', type=d.T.string)]), + withResourceName(resourceName): { resourceName: resourceName }, + '#withRestartPolicy':: d.fn(help='"Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet similarity index 96% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet index fb89d77e794..0a29b350d26 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet @@ -8,7 +8,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { terminated+: { containerID: containerID } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { terminated+: { exitCode: exitCode } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateRunning.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateRunning.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateRunning.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateRunning.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet similarity index 94% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet index a3c15616e47..f0e13bac143 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='containerStateTerminated', url='', help='"ContainerStateTerminated is a terminated state of a container."'), - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { containerID: containerID }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { exitCode: exitCode }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateWaiting.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateWaiting.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateWaiting.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateWaiting.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet similarity index 51% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet index f637415a0ae..886d4145657 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet @@ -10,7 +10,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { lastState+: { terminated+: { containerID: containerID } } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { lastState+: { terminated+: { exitCode: exitCode } } }, @@ -33,6 +33,21 @@ withReason(reason): { lastState+: { waiting+: { reason: reason } } }, }, }, + '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { resources+: { limits: limits } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { resources+: { limits+: limits } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { resources+: { requests: requests } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { resources+: { requests+: requests } }, + }, '#state':: d.obj(help='"ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting."'), state: { '#running':: d.obj(help='"ContainerStateRunning is a running state of a container."'), @@ -42,7 +57,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { state+: { terminated+: { containerID: containerID } } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { state+: { terminated+: { exitCode: exitCode } } }, @@ -65,19 +80,23 @@ withReason(reason): { state+: { waiting+: { reason: reason } } }, }, }, - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'.\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withAllocatedResources':: d.fn(help='"AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize."', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResources(allocatedResources): { allocatedResources: allocatedResources }, + '#withAllocatedResourcesMixin':: d.fn(help='"AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResourcesMixin(allocatedResources): { allocatedResources+: allocatedResources }, + '#withContainerID':: d.fn(help="\"ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \\\"containerd\\\").\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { containerID: containerID }, - '#withImage':: d.fn(help='"The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images."', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, - '#withImageID':: d.fn(help="\"ImageID of the container's image.\"", args=[d.arg(name='imageID', type=d.T.string)]), + '#withImageID':: d.fn(help="\"ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.\"", args=[d.arg(name='imageID', type=d.T.string)]), withImageID(imageID): { imageID: imageID }, - '#withName':: d.fn(help='"This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withReady':: d.fn(help='"Specifies whether the container has passed its readiness probe."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\\n\\nThe value is typically used to determine whether a container is ready to accept traffic."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { ready: ready }, - '#withRestartCount':: d.fn(help='"The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC."', args=[d.arg(name='restartCount', type=d.T.integer)]), + '#withRestartCount':: d.fn(help='"RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative."', args=[d.arg(name='restartCount', type=d.T.integer)]), withRestartCount(restartCount): { restartCount: restartCount }, - '#withStarted':: d.fn(help='"Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined."', args=[d.arg(name='started', type=d.T.boolean)]), + '#withStarted':: d.fn(help='"Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false."', args=[d.arg(name='started', type=d.T.boolean)]), withStarted(started): { started: started }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet similarity index 59% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet index cbf504bf3ec..78d0d7516aa 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet @@ -3,43 +3,50 @@ '#':: d.pkg(name='csiPersistentVolumeSource', url='', help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { controllerExpandSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { controllerExpandSecretRef+: { namespace: namespace } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { controllerPublishSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { controllerPublishSecretRef+: { namespace: namespace } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { nodeExpandSecretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { nodeExpandSecretRef+: { namespace: namespace } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodePublishSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { nodePublishSecretRef+: { namespace: namespace } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodeStageSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { nodeStageSecretRef+: { namespace: namespace } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { volumeAttributes: volumeAttributes }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { volumeAttributes+: volumeAttributes }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { volumeHandle: volumeHandle }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet similarity index 71% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet index fecc79031cf..1d8df7aaa97 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet @@ -6,15 +6,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodePublishSecretRef+: { name: name } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeAttributes':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { volumeAttributes: volumeAttributes }, - '#withVolumeAttributesMixin':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { volumeAttributes+: volumeAttributes }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/daemonEndpoint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/daemonEndpoint.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/daemonEndpoint.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/daemonEndpoint.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIProjection.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIProjection.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIProjection.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet new file mode 100644 index 00000000000..2a32c0886a2 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='downwardAPIVolumeFile', url='', help='"DownwardAPIVolumeFile represents information to create the file containing the pod field"'), + '#fieldRef':: d.obj(help='"ObjectFieldSelector selects an APIVersioned field of an object."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeSource.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeSource.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet new file mode 100644 index 00000000000..1d05420ff43 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='emptyDirVolumeSource', url='', help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + withMedium(medium): { medium: medium }, + '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), + withSizeLimit(sizeLimit): { sizeLimit: sizeLimit }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet similarity index 92% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet index dd6d6d9d52b..12628c496a1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet @@ -20,7 +20,7 @@ }, '#withHostname':: d.fn(help='"The Hostname of this endpoint"', args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withIp':: d.fn(help='"The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready."', args=[d.arg(name='ip', type=d.T.string)]), + '#withIp':: d.fn(help='"The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16)."', args=[d.arg(name='ip', type=d.T.string)]), withIp(ip): { ip: ip }, '#withNodeName':: d.fn(help='"Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet new file mode 100644 index 00000000000..1ba81c7f40e --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort is a tuple that describes a single port."'), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), + withAppProtocol(appProtocol): { appProtocol: appProtocol }, + '#withName':: d.fn(help="\"The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPort':: d.fn(help='"The port number of the endpoint."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet index 3cb64fc0960..40ac9bb8135 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointSubset', url='', help='"EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\\n {\\n Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n }\\nThe resulting set of endpoints can be viewed as:\\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]"'), + '#':: d.pkg(name='endpointSubset', url='', help='"EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\\n\\n\\t{\\n\\t Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n\\t}\\n\\nThe resulting set of endpoints can be viewed as:\\n\\n\\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\\n\\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]"'), '#withAddresses':: d.fn(help='"IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize."', args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, '#withAddressesMixin':: d.fn(help='"IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet index e1adbbfd79c..10a6231b9a0 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpoints', url='', help='"Endpoints is a collection of endpoints that implement the actual service. Example:\\n Name: \\"mysvc\\",\\n Subsets: [\\n {\\n Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n },\\n {\\n Addresses: [{\\"ip\\": \\"10.10.3.3\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 93}, {\\"name\\": \\"b\\", \\"port\\": 76}]\\n },\\n ]"'), + '#':: d.pkg(name='endpoints', url='', help='"Endpoints is a collection of endpoints that implement the actual service. Example:\\n\\n\\t Name: \\"mysvc\\",\\n\\t Subsets: [\\n\\t {\\n\\t Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n\\t },\\n\\t {\\n\\t Addresses: [{\\"ip\\": \\"10.10.3.3\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 93}, {\\"name\\": \\"b\\", \\"port\\": 76}]\\n\\t },\\n\\t]"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Endpoints', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envFromSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envFromSource.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envFromSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envFromSource.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet index 899a0c25d6b..a837784743d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet @@ -23,7 +23,7 @@ resourceFieldRef: { '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, @@ -40,7 +40,7 @@ }, '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet index 4fedae6993a..1c830b0ad03 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet @@ -21,7 +21,7 @@ resourceFieldRef: { '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), withResource(resource): { resourceFieldRef+: { resource: resource } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet similarity index 78% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet index 20a7dd8a0c2..937b5770da3 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ephemeralContainer', url='', help="\"An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.\""), + '#':: d.pkg(name='ephemeralContainer', url='', help='"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\\n\\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."'), '#lifecycle':: d.obj(help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), lifecycle: { - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -27,6 +27,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -35,7 +40,7 @@ withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -59,6 +64,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -77,6 +87,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -107,7 +124,7 @@ withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, @@ -121,6 +138,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -151,20 +175,24 @@ withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, }, '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#securityContext':: d.obj(help='"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence."'), @@ -193,7 +221,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -204,22 +232,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { securityContext+: { privileged: privileged } }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { securityContext+: { procMount: procMount } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, }, '#startupProbe':: d.obj(help='"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic."'), @@ -231,6 +261,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -261,18 +298,18 @@ withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, }, - '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), withArgs(args): { args: if std.isArray(v=args) then args else [args] }, - '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, - '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), withCommand(command): { command: if std.isArray(v=command) then command else [command] }, - '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, '#withEnv':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."', args=[d.arg(name='env', type=d.T.array)]), withEnv(env): { env: if std.isArray(v=env) then env else [env] }, @@ -282,7 +319,7 @@ withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, - '#withImage':: d.fn(help='"Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, '#withImagePullPolicy':: d.fn(help='"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, @@ -292,11 +329,17 @@ withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, '#withPortsMixin':: d.fn(help='"Ports are not allowed for ephemeral containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help='"Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), withStdin(stdin): { stdin: stdin }, '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), withStdinOnce(stdinOnce): { stdinOnce: stdinOnce }, - '#withTargetContainerName':: d.fn(help='"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature."', args=[d.arg(name='targetContainerName', type=d.T.string)]), + '#withTargetContainerName':: d.fn(help='"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\\n\\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined."', args=[d.arg(name='targetContainerName', type=d.T.string)]), withTargetContainerName(targetContainerName): { targetContainerName: targetContainerName }, '#withTerminationMessagePath':: d.fn(help="\"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.\"", args=[d.arg(name='terminationMessagePath', type=d.T.string)]), withTerminationMessagePath(terminationMessagePath): { terminationMessagePath: terminationMessagePath }, @@ -308,9 +351,9 @@ withVolumeDevices(volumeDevices): { volumeDevices: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, '#withVolumeDevicesMixin':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeDevices', type=d.T.array)]), withVolumeDevicesMixin(volumeDevices): { volumeDevices+: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, - '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Cannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), withVolumeMounts(volumeMounts): { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, - '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Cannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), withVolumeMountsMixin(volumeMounts): { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, '#withWorkingDir':: d.fn(help="\"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.\"", args=[d.arg(name='workingDir', type=d.T.string)]), withWorkingDir(workingDir): { workingDir: workingDir }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet similarity index 76% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet index d3e1ad852dc..45d326c96e4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet @@ -5,12 +5,10 @@ volumeClaimTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { volumeClaimTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { volumeClaimTemplate+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { volumeClaimTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { volumeClaimTemplate+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { volumeClaimTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { volumeClaimTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { volumeClaimTemplate+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { volumeClaimTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { volumeClaimTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { volumeClaimTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { volumeClaimTemplate+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { volumeClaimTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,9 +41,9 @@ withOwnerReferencesMixin(ownerReferences): { volumeClaimTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { volumeClaimTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { volumeClaimTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { volumeClaimTemplate+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -59,15 +57,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -81,15 +90,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } }, }, }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet index 78021ee8ef7..5211af2599b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet @@ -20,12 +20,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -36,21 +34,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -58,9 +56,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSeries.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSeries.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSeries.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSeries.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSource.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSource.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/execAction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/execAction.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/execAction.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/execAction.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet new file mode 100644 index 00000000000..31608c3d0f2 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet @@ -0,0 +1,20 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='fcVolumeSource', url='', help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNs(targetWWNs): { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNsMixin(targetWWNs): { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + withWwids(wwids): { wwids: if std.isArray(v=wwids) then wwids else [wwids] }, + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + withWwidsMixin(wwids): { wwids+: if std.isArray(v=wwids) then wwids else [wwids] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet similarity index 50% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet index e2324d0aa86..545fc3a115e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet @@ -3,20 +3,20 @@ '#':: d.pkg(name='flexPersistentVolumeSource', url='', help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { secretRef+: { namespace: namespace } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { options: options }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { options+: options }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet similarity index 51% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet index 09fb22cbb4c..d365d5e863b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet @@ -6,15 +6,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { options: options }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { options+: options }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet similarity index 54% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet index 0d140f17ad6..722d636ac98 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='flockerVolumeSource', url='', help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { datasetName: datasetName }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { datasetUUID: datasetUUID }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..e045186a789 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='gcePersistentDiskVolumeSource', url='', help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + withPdName(pdName): { pdName: pdName }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet index 5026bd1cb7b..e9e60213059 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='gitRepoVolumeSource', url='', help="\"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\\n\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.\""), - '#withDirectory':: d.fn(help="\"Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + '#withDirectory':: d.fn(help="\"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), withDirectory(directory): { directory: directory }, - '#withRepository':: d.fn(help='"Repository URL"', args=[d.arg(name='repository', type=d.T.string)]), + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), withRepository(repository): { repository: repository }, - '#withRevision':: d.fn(help='"Commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), withRevision(revision): { revision: revision }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet similarity index 72% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet index 7959032f0ac..d504871c659 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='glusterfsPersistentVolumeSource', url='', help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { endpoints: endpoints }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { endpointsNamespace: endpointsNamespace }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet similarity index 67% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet index 5f61ee9b7da..1698ec6362a 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='glusterfsVolumeSource', url='', help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { endpoints: endpoints }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet new file mode 100644 index 00000000000..d2d37a0e0e1 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='grpcAction', url='', help=''), + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { service: service }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostAlias.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostAlias.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostAlias.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostAlias.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet new file mode 100644 index 00000000000..945b1e004c0 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='hostIP', url='', help='"HostIP represents a single IP address allocated to the host."'), + '#withIp':: d.fn(help='"IP is the IP address assigned to the host"', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet similarity index 83% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet index f7e35872a58..1e3dd933627 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='hostPathVolumeSource', url='', help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpGetAction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpGetAction.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpGetAction.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpGetAction.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet similarity index 65% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet index 0a78adb55e8..39644271996 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='httpHeader', url='', help='"HTTPHeader describes a custom header to be used in HTTP probes"'), - '#withName':: d.fn(help='"The header field name"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), withValue(value): { value: value }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..430270d71af --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet @@ -0,0 +1,35 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='iscsiPersistentVolumeSource', url='', help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { initiatorName: initiatorName }, + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iqn: iqn }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { targetPortal: targetPortal }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet new file mode 100644 index 00000000000..e905fb959b7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='iscsiVolumeSource', url='', help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { initiatorName: initiatorName }, + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iqn: iqn }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { targetPortal: targetPortal }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet new file mode 100644 index 00000000000..288c94ad40b --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='keyToPath', url='', help='"Maps a string key to a path within a volume."'), + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet similarity index 89% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet index ca610c685b8..84282f59e9b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='lifecycle', url='', help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -25,6 +25,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { postStart+: { httpGet+: { scheme: scheme } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { postStart+: { sleep+: { seconds: seconds } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -33,7 +38,7 @@ withPort(port): { postStart+: { tcpSocket+: { port: port } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -57,6 +62,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { preStop+: { httpGet+: { scheme: scheme } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { preStop+: { sleep+: { seconds: seconds } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet similarity index 88% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet index 5067f1ffc93..1e967c78bbd 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='handler', url='', help='"Handler defines a specific action that should be taken"'), + '#':: d.pkg(name='lifecycleHandler', url='', help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), @@ -23,6 +23,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { httpGet+: { scheme: scheme } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { sleep+: { seconds: seconds } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet index eea20a3888d..7310f781b2e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='limitRange', url='', help='"LimitRange sets resource usage limits for each kind of resource in a Namespace."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of LimitRange', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeItem.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeItem.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeItem.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeItem.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet similarity index 68% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet index fae81edd0e7..17d2f7ea15f 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet @@ -5,6 +5,8 @@ withHostname(hostname): { hostname: hostname }, '#withIp':: d.fn(help='"IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)"', args=[d.arg(name='ip', type=d.T.string)]), withIp(ip): { ip: ip }, + '#withIpMode':: d.fn(help="\"IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \\\"VIP\\\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \\\"Proxy\\\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.\"", args=[d.arg(name='ipMode', type=d.T.string)]), + withIpMode(ipMode): { ipMode: ipMode }, '#withPorts':: d.fn(help='"Ports is a list of records of service ports If used, every port defined in the service should have an entry in it"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, '#withPortsMixin':: d.fn(help='"Ports is a list of records of service ports If used, every port defined in the service should have an entry in it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localObjectReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localObjectReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localObjectReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet new file mode 100644 index 00000000000..420a881e95f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='localVolumeSource', url='', help='"Local represents directly-attached storage with node affinity (Beta feature)"'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet similarity index 92% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet index b4d784e4866..ef9fbe09d76 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet @@ -13,7 +13,9 @@ cephFSVolumeSource: (import 'cephFSVolumeSource.libsonnet'), cinderPersistentVolumeSource: (import 'cinderPersistentVolumeSource.libsonnet'), cinderVolumeSource: (import 'cinderVolumeSource.libsonnet'), + claimSource: (import 'claimSource.libsonnet'), clientIPConfig: (import 'clientIPConfig.libsonnet'), + clusterTrustBundleProjection: (import 'clusterTrustBundleProjection.libsonnet'), componentCondition: (import 'componentCondition.libsonnet'), componentStatus: (import 'componentStatus.libsonnet'), configMap: (import 'configMap.libsonnet'), @@ -25,6 +27,7 @@ container: (import 'container.libsonnet'), containerImage: (import 'containerImage.libsonnet'), containerPort: (import 'containerPort.libsonnet'), + containerResizePolicy: (import 'containerResizePolicy.libsonnet'), containerState: (import 'containerState.libsonnet'), containerStateRunning: (import 'containerStateRunning.libsonnet'), containerStateTerminated: (import 'containerStateTerminated.libsonnet'), @@ -45,7 +48,6 @@ envVar: (import 'envVar.libsonnet'), envVarSource: (import 'envVarSource.libsonnet'), ephemeralContainer: (import 'ephemeralContainer.libsonnet'), - ephemeralContainers: (import 'ephemeralContainers.libsonnet'), ephemeralVolumeSource: (import 'ephemeralVolumeSource.libsonnet'), event: (import 'event.libsonnet'), eventSeries: (import 'eventSeries.libsonnet'), @@ -59,8 +61,9 @@ gitRepoVolumeSource: (import 'gitRepoVolumeSource.libsonnet'), glusterfsPersistentVolumeSource: (import 'glusterfsPersistentVolumeSource.libsonnet'), glusterfsVolumeSource: (import 'glusterfsVolumeSource.libsonnet'), - handler: (import 'handler.libsonnet'), + grpcAction: (import 'grpcAction.libsonnet'), hostAlias: (import 'hostAlias.libsonnet'), + hostIP: (import 'hostIP.libsonnet'), hostPathVolumeSource: (import 'hostPathVolumeSource.libsonnet'), httpGetAction: (import 'httpGetAction.libsonnet'), httpHeader: (import 'httpHeader.libsonnet'), @@ -68,6 +71,7 @@ iscsiVolumeSource: (import 'iscsiVolumeSource.libsonnet'), keyToPath: (import 'keyToPath.libsonnet'), lifecycle: (import 'lifecycle.libsonnet'), + lifecycleHandler: (import 'lifecycleHandler.libsonnet'), limitRange: (import 'limitRange.libsonnet'), limitRangeItem: (import 'limitRangeItem.libsonnet'), limitRangeSpec: (import 'limitRangeSpec.libsonnet'), @@ -75,6 +79,7 @@ loadBalancerStatus: (import 'loadBalancerStatus.libsonnet'), localObjectReference: (import 'localObjectReference.libsonnet'), localVolumeSource: (import 'localVolumeSource.libsonnet'), + modifyVolumeStatus: (import 'modifyVolumeStatus.libsonnet'), namespace: (import 'namespace.libsonnet'), namespaceCondition: (import 'namespaceCondition.libsonnet'), namespaceSpec: (import 'namespaceSpec.libsonnet'), @@ -113,7 +118,11 @@ podDNSConfig: (import 'podDNSConfig.libsonnet'), podDNSConfigOption: (import 'podDNSConfigOption.libsonnet'), podIP: (import 'podIP.libsonnet'), + podOS: (import 'podOS.libsonnet'), podReadinessGate: (import 'podReadinessGate.libsonnet'), + podResourceClaim: (import 'podResourceClaim.libsonnet'), + podResourceClaimStatus: (import 'podResourceClaimStatus.libsonnet'), + podSchedulingGate: (import 'podSchedulingGate.libsonnet'), podSecurityContext: (import 'podSecurityContext.libsonnet'), podSpec: (import 'podSpec.libsonnet'), podStatus: (import 'podStatus.libsonnet'), @@ -131,6 +140,7 @@ replicationControllerCondition: (import 'replicationControllerCondition.libsonnet'), replicationControllerSpec: (import 'replicationControllerSpec.libsonnet'), replicationControllerStatus: (import 'replicationControllerStatus.libsonnet'), + resourceClaim: (import 'resourceClaim.libsonnet'), resourceFieldSelector: (import 'resourceFieldSelector.libsonnet'), resourceQuota: (import 'resourceQuota.libsonnet'), resourceQuotaSpec: (import 'resourceQuotaSpec.libsonnet'), @@ -156,6 +166,7 @@ serviceSpec: (import 'serviceSpec.libsonnet'), serviceStatus: (import 'serviceStatus.libsonnet'), sessionAffinityConfig: (import 'sessionAffinityConfig.libsonnet'), + sleepAction: (import 'sleepAction.libsonnet'), storageOSPersistentVolumeSource: (import 'storageOSPersistentVolumeSource.libsonnet'), storageOSVolumeSource: (import 'storageOSVolumeSource.libsonnet'), sysctl: (import 'sysctl.libsonnet'), @@ -166,11 +177,13 @@ topologySelectorTerm: (import 'topologySelectorTerm.libsonnet'), topologySpreadConstraint: (import 'topologySpreadConstraint.libsonnet'), typedLocalObjectReference: (import 'typedLocalObjectReference.libsonnet'), + typedObjectReference: (import 'typedObjectReference.libsonnet'), volume: (import 'volume.libsonnet'), volumeDevice: (import 'volumeDevice.libsonnet'), volumeMount: (import 'volumeMount.libsonnet'), volumeNodeAffinity: (import 'volumeNodeAffinity.libsonnet'), volumeProjection: (import 'volumeProjection.libsonnet'), + volumeResourceRequirements: (import 'volumeResourceRequirements.libsonnet'), vsphereVirtualDiskVolumeSource: (import 'vsphereVirtualDiskVolumeSource.libsonnet'), weightedPodAffinityTerm: (import 'weightedPodAffinityTerm.libsonnet'), windowsSecurityContextOptions: (import 'windowsSecurityContextOptions.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet new file mode 100644 index 00000000000..7ff98a27d28 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='modifyVolumeStatus', url='', help='"ModifyVolumeStatus represents the status object of ControllerModifyVolume operation"'), + '#withTargetVolumeAttributesClassName':: d.fn(help='"targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled"', args=[d.arg(name='targetVolumeAttributesClassName', type=d.T.string)]), + withTargetVolumeAttributesClassName(targetVolumeAttributesClassName): { targetVolumeAttributesClassName: targetVolumeAttributesClassName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet index 520aec3462e..d95602b3c43 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='namespace', url='', help='"Namespace provides a scope for Names. Use of multiple namespaces is optional."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Namespace', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet index 723a927ebda..ce26840604e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nfsVolumeSource', url='', help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { server: server }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet index 541fd34b1fc..6303a35b316 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='node', url='', help='"Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd)."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Node', args=[d.arg(name='name', type=d.T.string)]), @@ -53,9 +51,9 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"NodeSpec describes the attributes that a node is created with."'), spec: { - '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), configSource: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { spec+: { configSource+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAddress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAddress.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAddress.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAddress.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAffinity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAffinity.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAffinity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAffinity.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet similarity index 89% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet index f1f5b17762c..4845bb030b7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nodeConfigSource', url='', help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#':: d.pkg(name='nodeConfigSource', url='', help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { configMap+: { kubeletConfigKey: kubeletConfigKey } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet similarity index 92% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet index 3648b5e4835..d79efd2857d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nodeConfigStatus', url='', help='"NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource."'), - '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), active: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { active+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, @@ -17,9 +17,9 @@ withUid(uid): { active+: { configMap+: { uid: uid } } }, }, }, - '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), assigned: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { assigned+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, @@ -33,9 +33,9 @@ withUid(uid): { assigned+: { configMap+: { uid: uid } } }, }, }, - '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), lastKnownGood: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { lastKnownGood+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeDaemonEndpoints.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeDaemonEndpoints.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeDaemonEndpoints.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeDaemonEndpoints.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelector.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelector.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorRequirement.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorRequirement.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorRequirement.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorRequirement.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorTerm.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorTerm.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorTerm.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorTerm.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet similarity index 95% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet index fddf879d618..a06625e279e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nodeSpec', url='', help='"NodeSpec describes the attributes that a node is created with."'), - '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), configSource: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { configSource+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet index 569c8753069..542ee6edaba 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet @@ -3,9 +3,9 @@ '#':: d.pkg(name='nodeStatus', url='', help='"NodeStatus is information about the current status of a node."'), '#config':: d.obj(help='"NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource."'), config: { - '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), active: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { active+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -19,9 +19,9 @@ withUid(uid): { config+: { active+: { configMap+: { uid: uid } } } }, }, }, - '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), assigned: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { assigned+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -35,9 +35,9 @@ withUid(uid): { config+: { assigned+: { configMap+: { uid: uid } } } }, }, }, - '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), lastKnownGood: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { lastKnownGood+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -68,7 +68,7 @@ withArchitecture(architecture): { nodeInfo+: { architecture: architecture } }, '#withBootID':: d.fn(help='"Boot ID reported by the node."', args=[d.arg(name='bootID', type=d.T.string)]), withBootID(bootID): { nodeInfo+: { bootID: bootID } }, - '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), + '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), withContainerRuntimeVersion(containerRuntimeVersion): { nodeInfo+: { containerRuntimeVersion: containerRuntimeVersion } }, '#withKernelVersion':: d.fn(help="\"Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\"", args=[d.arg(name='kernelVersion', type=d.T.string)]), withKernelVersion(kernelVersion): { nodeInfo+: { kernelVersion: kernelVersion } }, @@ -85,9 +85,9 @@ '#withSystemUUID':: d.fn(help='"SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid"', args=[d.arg(name='systemUUID', type=d.T.string)]), withSystemUUID(systemUUID): { nodeInfo+: { systemUUID: systemUUID } }, }, - '#withAddresses':: d.fn(help='"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example."', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddresses':: d.fn(help="\"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).\"", args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddressesMixin':: d.fn(help="\"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='addresses', type=d.T.array)]), withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, '#withAllocatable':: d.fn(help='"Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity."', args=[d.arg(name='allocatable', type=d.T.object)]), withAllocatable(allocatable): { allocatable: allocatable }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet similarity index 94% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet index 8b4d2593097..110062278e2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet @@ -5,7 +5,7 @@ withArchitecture(architecture): { architecture: architecture }, '#withBootID':: d.fn(help='"Boot ID reported by the node."', args=[d.arg(name='bootID', type=d.T.string)]), withBootID(bootID): { bootID: bootID }, - '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), + '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), withContainerRuntimeVersion(containerRuntimeVersion): { containerRuntimeVersion: containerRuntimeVersion }, '#withKernelVersion':: d.fn(help="\"Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\"", args=[d.arg(name='kernelVersion', type=d.T.string)]), withKernelVersion(kernelVersion): { kernelVersion: kernelVersion }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectFieldSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectFieldSelector.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectFieldSelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectFieldSelector.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet similarity index 61% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet index d87caa24595..888fd18e425 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolume', url='', help='"PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PersistentVolume', args=[d.arg(name='name', type=d.T.string)]), @@ -55,77 +53,77 @@ spec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { spec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { spec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { spec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { azureDisk+: { readOnly: readOnly } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { spec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { spec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { spec+: { azureFile+: { shareName: shareName } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { spec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { cephfs+: { user: user } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { cinder+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { cinder+: { volumeID: volumeID } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -149,164 +147,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { csi+: { nodeExpandSecretRef+: { name: name } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { spec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { spec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { spec+: { csi+: { volumeHandle: volumeHandle } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { spec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { spec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { spec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { spec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { spec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { spec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { flexVolume+: { readOnly: readOnly } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { spec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { spec+: { flocker+: { datasetUUID: datasetUUID } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { spec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { gcePersistentDisk+: { readOnly: readOnly } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { spec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { spec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { glusterfs+: { readOnly: readOnly } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { hostPath+: { type: type } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { spec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { spec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { spec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { spec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { spec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { spec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { spec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { spec+: { iscsi+: { targetPortal: targetPortal } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { 'local'+: { path: path } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { spec+: { nfs+: { server: server } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -321,87 +326,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { spec+: { photonPersistentDisk+: { pdID: pdID } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { portworxVolume+: { volumeID: volumeID } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { spec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { spec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { spec+: { quobyte+: { volume: volume } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { rbd+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { spec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { spec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { spec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { rbd+: { user: user } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { spec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { spec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { spec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { spec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { spec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { spec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { scaleIO+: { volumeName: volumeName } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -423,42 +428,44 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { storageos+: { secretRef+: { uid: uid } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { spec+: { storageos+: { volumeNamespace: volumeNamespace } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { spec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { spec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { spec+: { vsphereVolume+: { volumePath: volumePath } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { spec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { spec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { spec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { spec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet index 43fb102e610..ea146d73c7e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolumeClaim', url='', help="\"PersistentVolumeClaim is a user's request for and claim to a persistent volume\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PersistentVolumeClaim', args=[d.arg(name='name', type=d.T.string)]), @@ -62,15 +60,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { dataSource+: { name: name } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { dataSourceRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { dataSourceRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { dataSourceRef+: { name: name } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { dataSourceRef+: { namespace: namespace } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { spec+: { resources+: { limits: limits } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { spec+: { resources+: { limits+: limits } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { spec+: { resources+: { requests: requests } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { spec+: { resources+: { requests+: requests } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -84,15 +93,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { volumeName: volumeName } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet similarity index 66% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet index 80bd3eb5b7b..d4dd24ade92 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeClaimCondition', url='', help='"PersistentVolumeClaimCondition contails details about state of pvc"'), + '#':: d.pkg(name='persistentVolumeClaimCondition', url='', help='"PersistentVolumeClaimCondition contains details about state of pvc"'), '#withLastProbeTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastProbeTime', type=d.T.string)]), withLastProbeTime(lastProbeTime): { lastProbeTime: lastProbeTime }, '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"Human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"message is the human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \\\"ResizeStarted\\\" that means the underlying persistent volume is being resized.\"", args=[d.arg(name='reason', type=d.T.string)]), + '#withReason':: d.fn(help="\"reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \\\"ResizeStarted\\\" that means the underlying persistent volume is being resized.\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, '#withType':: d.fn(help='', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet similarity index 60% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet index 37a653e22c1..811c0cf70a9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet @@ -10,15 +10,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { dataSource+: { name: name } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { dataSourceRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { dataSourceRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { dataSourceRef+: { name: name } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { dataSourceRef+: { namespace: namespace } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -32,15 +43,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeAttributesClassName: volumeAttributesClassName }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeMode: volumeMode }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet new file mode 100644 index 00000000000..371bca51a82 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet @@ -0,0 +1,35 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='persistentVolumeClaimStatus', url='', help='"PersistentVolumeClaimStatus is the current status of a persistent volume claim."'), + '#modifyVolumeStatus':: d.obj(help='"ModifyVolumeStatus represents the status object of ControllerModifyVolume operation"'), + modifyVolumeStatus: { + '#withTargetVolumeAttributesClassName':: d.fn(help='"targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled"', args=[d.arg(name='targetVolumeAttributesClassName', type=d.T.string)]), + withTargetVolumeAttributesClassName(targetVolumeAttributesClassName): { modifyVolumeStatus+: { targetVolumeAttributesClassName: targetVolumeAttributesClassName } }, + }, + '#withAccessModes':: d.fn(help='"accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, + '#withAccessModesMixin':: d.fn(help='"accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, + '#withAllocatedResourceStatuses':: d.fn(help="\"allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\\"example.com/my-custom-resource\\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nClaimResourceStatus can be in any of following states:\\n\\t- ControllerResizeInProgress:\\n\\t\\tState set when resize controller starts resizing the volume in control-plane.\\n\\t- ControllerResizeFailed:\\n\\t\\tState set when resize has failed in resize controller with a terminal error.\\n\\t- NodeResizePending:\\n\\t\\tState set when resize controller has finished resizing the volume but further resizing of\\n\\t\\tvolume is needed on the node.\\n\\t- NodeResizeInProgress:\\n\\t\\tState set when kubelet starts resizing the volume.\\n\\t- NodeResizeFailed:\\n\\t\\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\\n\\t\\tNodeResizeFailed.\\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\\n\\t- pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeFailed\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizePending\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeFailed\\\"\\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\\n\\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.\"", args=[d.arg(name='allocatedResourceStatuses', type=d.T.object)]), + withAllocatedResourceStatuses(allocatedResourceStatuses): { allocatedResourceStatuses: allocatedResourceStatuses }, + '#withAllocatedResourceStatusesMixin':: d.fn(help="\"allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\\"example.com/my-custom-resource\\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nClaimResourceStatus can be in any of following states:\\n\\t- ControllerResizeInProgress:\\n\\t\\tState set when resize controller starts resizing the volume in control-plane.\\n\\t- ControllerResizeFailed:\\n\\t\\tState set when resize has failed in resize controller with a terminal error.\\n\\t- NodeResizePending:\\n\\t\\tState set when resize controller has finished resizing the volume but further resizing of\\n\\t\\tvolume is needed on the node.\\n\\t- NodeResizeInProgress:\\n\\t\\tState set when kubelet starts resizing the volume.\\n\\t- NodeResizeFailed:\\n\\t\\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\\n\\t\\tNodeResizeFailed.\\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\\n\\t- pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeFailed\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizePending\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeFailed\\\"\\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\\n\\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allocatedResourceStatuses', type=d.T.object)]), + withAllocatedResourceStatusesMixin(allocatedResourceStatuses): { allocatedResourceStatuses+: allocatedResourceStatuses }, + '#withAllocatedResources':: d.fn(help='"allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\"example.com/my-custom-resource\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\\n\\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature."', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResources(allocatedResources): { allocatedResources: allocatedResources }, + '#withAllocatedResourcesMixin':: d.fn(help='"allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\"example.com/my-custom-resource\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\\n\\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResourcesMixin(allocatedResources): { allocatedResources+: allocatedResources }, + '#withCapacity':: d.fn(help='"capacity represents the actual resources of the underlying volume."', args=[d.arg(name='capacity', type=d.T.object)]), + withCapacity(capacity): { capacity: capacity }, + '#withCapacityMixin':: d.fn(help='"capacity represents the actual resources of the underlying volume."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='capacity', type=d.T.object)]), + withCapacityMixin(capacity): { capacity+: capacity }, + '#withConditions':: d.fn(help="\"conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withCurrentVolumeAttributesClassName':: d.fn(help='"currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='currentVolumeAttributesClassName', type=d.T.string)]), + withCurrentVolumeAttributesClassName(currentVolumeAttributesClassName): { currentVolumeAttributesClassName: currentVolumeAttributesClassName }, + '#withPhase':: d.fn(help='"phase represents the current phase of PersistentVolumeClaim."', args=[d.arg(name='phase', type=d.T.string)]), + withPhase(phase): { phase: phase }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet index 3e0ace78d50..92d58d84b99 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolumeClaimTemplate', url='', help='"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -57,15 +55,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { dataSource+: { name: name } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { dataSourceRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { dataSourceRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { dataSourceRef+: { name: name } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { dataSourceRef+: { namespace: namespace } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { spec+: { resources+: { limits: limits } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { spec+: { resources+: { limits+: limits } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { spec+: { resources+: { requests: requests } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { spec+: { resources+: { requests+: requests } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -79,15 +88,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { volumeName: volumeName } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet similarity index 77% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet index 5f434704f60..41a0af8d37d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='persistentVolumeClaimVolumeSource', url='', help="\"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).\""), - '#withClaimName':: d.fn(help='"ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), withClaimName(claimName): { claimName: claimName }, - '#withReadOnly':: d.fn(help='"Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet similarity index 54% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet index 129be6a596e..e20d809ce6b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet @@ -3,77 +3,77 @@ '#':: d.pkg(name='persistentVolumeSpec', url='', help='"PersistentVolumeSpec is the specification of a persistent volume."'), '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { azureDisk+: { diskName: diskName } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { azureDisk+: { fsType: fsType } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { azureDisk+: { kind: kind } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { azureFile+: { secretName: secretName } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { azureFile+: { secretNamespace: secretNamespace } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { azureFile+: { shareName: shareName } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cephfs+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { cephfs+: { secretRef+: { namespace: namespace } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { cephfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { cephfs+: { user: user } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cinder+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { cinder+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { cinder+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -97,164 +97,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { controllerExpandSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { controllerExpandSecretRef+: { namespace: namespace } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { controllerPublishSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { controllerPublishSecretRef+: { namespace: namespace } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { csi+: { nodeExpandSecretRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { csi+: { nodeExpandSecretRef+: { namespace: namespace } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { nodePublishSecretRef+: { namespace: namespace } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodeStageSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { nodeStageSecretRef+: { namespace: namespace } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { csi+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { csi+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { csi+: { volumeHandle: volumeHandle } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fc+: { fsType: fsType } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { fc+: { lun: lun } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { flexVolume+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { flexVolume+: { secretRef+: { namespace: namespace } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { flexVolume+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { flexVolume+: { fsType: fsType } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { flexVolume+: { options: options } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { flexVolume+: { options+: options } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { gcePersistentDisk+: { partition: partition } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { glusterfs+: { endpointsNamespace: endpointsNamespace } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { glusterfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { hostPath+: { path: path } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { hostPath+: { type: type } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { iscsi+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { iscsi+: { secretRef+: { namespace: namespace } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { iscsi+: { fsType: fsType } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { iscsi+: { iqn: iqn } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { iscsi+: { lun: lun } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { 'local'+: { fsType: fsType } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { 'local'+: { path: path } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { nfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { nfs+: { server: server } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -269,87 +276,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { portworxVolume+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { quobyte+: { group: group } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { quobyte+: { registry: registry } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { quobyte+: { tenant: tenant } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { quobyte+: { user: user } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { quobyte+: { volume: volume } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { rbd+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { rbd+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { rbd+: { fsType: fsType } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { rbd+: { image: image } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { rbd+: { keyring: keyring } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { rbd+: { pool: pool } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { rbd+: { user: user } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleIO+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { scaleIO+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { scaleIO+: { fsType: fsType } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { scaleIO+: { gateway: gateway } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { scaleIO+: { system: system } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -371,42 +378,44 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { storageos+: { secretRef+: { uid: uid } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { storageos+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { capacity: capacity }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { capacity+: capacity }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeAttributesClassName: volumeAttributesClassName }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeMode: volumeMode }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet new file mode 100644 index 00000000000..e855006c2ae --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='persistentVolumeStatus', url='', help='"PersistentVolumeStatus is the current status of a persistent volume."'), + '#withLastPhaseTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastPhaseTransitionTime', type=d.T.string)]), + withLastPhaseTransitionTime(lastPhaseTransitionTime): { lastPhaseTransitionTime: lastPhaseTransitionTime }, + '#withMessage':: d.fn(help='"message is a human-readable message indicating details about why the volume is in this state."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withPhase':: d.fn(help='"phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase"', args=[d.arg(name='phase', type=d.T.string)]), + withPhase(phase): { phase: phase }, + '#withReason':: d.fn(help='"reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..15b92e04340 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='photonPersistentDiskVolumeSource', url='', help='"Represents a Photon Controller persistent disk resource."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + withPdID(pdID): { pdID: pdID }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet index abfdcb6e4df..b663c0d888e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='pod', url='', help='"Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Pod', args=[d.arg(name='name', type=d.T.string)]), @@ -107,6 +105,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { os+: { name: name } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -122,7 +125,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { securityContext+: { seccompProfile+: { type: type } } } }, @@ -133,26 +136,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { securityContext+: { fsGroup: fsGroup } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { securityContext+: { runAsGroup: runAsGroup } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { securityContext+: { runAsUser: runAsUser } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -167,9 +172,9 @@ withDnsPolicy(dnsPolicy): { spec+: { dnsPolicy: dnsPolicy } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { enableServiceLinks: enableServiceLinks } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } }, @@ -181,11 +186,13 @@ withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { hostPID: hostPID } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { hostUsers: hostUsers } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { hostname: hostname } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } }, @@ -197,26 +204,34 @@ withNodeSelector(nodeSelector): { spec+: { nodeSelector: nodeSelector } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { nodeSelector+: nodeSelector } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { overhead: overhead } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { overhead+: overhead } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { preemptionPolicy: preemptionPolicy } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { priority: priority } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { priorityClassName: priorityClassName } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { restartPolicy: restartPolicy } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { runtimeClassName: runtimeClassName } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { schedulerName: schedulerName } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { serviceAccount: serviceAccount } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinity.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinity.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet similarity index 59% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet index e3dd627ab9e..453a9af8e1f 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet @@ -23,9 +23,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, }, - '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, - '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), withTopologyKey(topologyKey): { topologyKey: topologyKey }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAntiAffinity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAntiAffinity.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAntiAffinity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAntiAffinity.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfig.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfig.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfig.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfig.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfigOption.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfigOption.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfigOption.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfigOption.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet new file mode 100644 index 00000000000..08951dc0c7d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podIP', url='', help='"PodIP represents a single IP address allocated to the pod."'), + '#withIp':: d.fn(help='"IP is the IP address assigned to the pod"', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet new file mode 100644 index 00000000000..fd6b94965f2 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podOS', url='', help='"PodOS defines the OS parameters of a pod."'), + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podReadinessGate.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podReadinessGate.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podReadinessGate.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podReadinessGate.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet new file mode 100644 index 00000000000..bbe0a5b228c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet @@ -0,0 +1,15 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podResourceClaim', url='', help='"PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name."'), + '#source':: d.obj(help='"ClaimSource describes a reference to a ResourceClaim.\\n\\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value."'), + source: { + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { source+: { resourceClaimName: resourceClaimName } }, + '#withResourceClaimTemplateName':: d.fn(help='"ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\\n\\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\\n\\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."', args=[d.arg(name='resourceClaimTemplateName', type=d.T.string)]), + withResourceClaimTemplateName(resourceClaimTemplateName): { source+: { resourceClaimTemplateName: resourceClaimTemplateName } }, + }, + '#withName':: d.fn(help='"Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet new file mode 100644 index 00000000000..11fa0c7833b --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podResourceClaimStatus', url='', help='"PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim."'), + '#withName':: d.fn(help='"Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { resourceClaimName: resourceClaimName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet new file mode 100644 index 00000000000..072a4eb6562 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingGate', url='', help='"PodSchedulingGate is associated to a Pod to guard its scheduling."'), + '#withName':: d.fn(help='"Name of the scheduling gate. Each scheduling gate must have a unique name field."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet similarity index 71% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet index a918355cad0..3515afc7ec1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet @@ -14,7 +14,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { seccompProfile+: { localhostProfile: localhostProfile } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { seccompProfile+: { type: type } }, @@ -25,26 +25,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { windowsOptions+: { hostProcess: hostProcess } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { windowsOptions+: { runAsUserName: runAsUserName } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { fsGroup: fsGroup }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { fsGroupChangePolicy: fsGroupChangePolicy }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { runAsGroup: runAsGroup }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { runAsNonRoot: runAsNonRoot }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { runAsUser: runAsUser }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet index fb01a200601..6312789a7c3 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet @@ -55,6 +55,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { os+: { name: name } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -70,7 +75,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -81,26 +86,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { securityContext+: { fsGroup: fsGroup } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -115,9 +122,9 @@ withDnsPolicy(dnsPolicy): { dnsPolicy: dnsPolicy }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { enableServiceLinks: enableServiceLinks }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] }, @@ -129,11 +136,13 @@ withHostNetwork(hostNetwork): { hostNetwork: hostNetwork }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { hostPID: hostPID }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { hostUsers: hostUsers }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] }, @@ -145,26 +154,34 @@ withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { overhead: overhead }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { overhead+: overhead }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { priority: priority }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { priorityClassName: priorityClassName }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { runtimeClassName: runtimeClassName }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { schedulerName: schedulerName }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { serviceAccount: serviceAccount }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet similarity index 67% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet index 9601a60537a..e76c07d1286 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet @@ -5,16 +5,20 @@ withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, '#withConditionsMixin':: d.fn(help='"Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withContainerStatuses':: d.fn(help='"The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='containerStatuses', type=d.T.array)]), + '#withContainerStatuses':: d.fn(help='"The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='containerStatuses', type=d.T.array)]), withContainerStatuses(containerStatuses): { containerStatuses: if std.isArray(v=containerStatuses) then containerStatuses else [containerStatuses] }, - '#withContainerStatusesMixin':: d.fn(help='"The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containerStatuses', type=d.T.array)]), + '#withContainerStatusesMixin':: d.fn(help='"The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containerStatuses', type=d.T.array)]), withContainerStatusesMixin(containerStatuses): { containerStatuses+: if std.isArray(v=containerStatuses) then containerStatuses else [containerStatuses] }, - '#withEphemeralContainerStatuses':: d.fn(help='"Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature."', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), + '#withEphemeralContainerStatuses':: d.fn(help='"Status for any ephemeral containers that have run in this pod."', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), withEphemeralContainerStatuses(ephemeralContainerStatuses): { ephemeralContainerStatuses: if std.isArray(v=ephemeralContainerStatuses) then ephemeralContainerStatuses else [ephemeralContainerStatuses] }, - '#withEphemeralContainerStatusesMixin':: d.fn(help='"Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), + '#withEphemeralContainerStatusesMixin':: d.fn(help='"Status for any ephemeral containers that have run in this pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), withEphemeralContainerStatusesMixin(ephemeralContainerStatuses): { ephemeralContainerStatuses+: if std.isArray(v=ephemeralContainerStatuses) then ephemeralContainerStatuses else [ephemeralContainerStatuses] }, - '#withHostIP':: d.fn(help='"IP address of the host to which the pod is assigned. Empty if not yet scheduled."', args=[d.arg(name='hostIP', type=d.T.string)]), + '#withHostIP':: d.fn(help='"hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod"', args=[d.arg(name='hostIP', type=d.T.string)]), withHostIP(hostIP): { hostIP: hostIP }, + '#withHostIPs':: d.fn(help='"hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod."', args=[d.arg(name='hostIPs', type=d.T.array)]), + withHostIPs(hostIPs): { hostIPs: if std.isArray(v=hostIPs) then hostIPs else [hostIPs] }, + '#withHostIPsMixin':: d.fn(help='"hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostIPs', type=d.T.array)]), + withHostIPsMixin(hostIPs): { hostIPs+: if std.isArray(v=hostIPs) then hostIPs else [hostIPs] }, '#withInitContainerStatuses':: d.fn(help='"The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='initContainerStatuses', type=d.T.array)]), withInitContainerStatuses(initContainerStatuses): { initContainerStatuses: if std.isArray(v=initContainerStatuses) then initContainerStatuses else [initContainerStatuses] }, '#withInitContainerStatusesMixin':: d.fn(help='"The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainerStatuses', type=d.T.array)]), @@ -25,16 +29,22 @@ withNominatedNodeName(nominatedNodeName): { nominatedNodeName: nominatedNodeName }, '#withPhase':: d.fn(help="\"The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\\n\\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\\n\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase\"", args=[d.arg(name='phase', type=d.T.string)]), withPhase(phase): { phase: phase }, - '#withPodIP':: d.fn(help='"IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated."', args=[d.arg(name='podIP', type=d.T.string)]), + '#withPodIP':: d.fn(help='"podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated."', args=[d.arg(name='podIP', type=d.T.string)]), withPodIP(podIP): { podIP: podIP }, '#withPodIPs':: d.fn(help='"podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet."', args=[d.arg(name='podIPs', type=d.T.array)]), withPodIPs(podIPs): { podIPs: if std.isArray(v=podIPs) then podIPs else [podIPs] }, '#withPodIPsMixin':: d.fn(help='"podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podIPs', type=d.T.array)]), withPodIPsMixin(podIPs): { podIPs+: if std.isArray(v=podIPs) then podIPs else [podIPs] }, - '#withQosClass':: d.fn(help='"The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md"', args=[d.arg(name='qosClass', type=d.T.string)]), + '#withQosClass':: d.fn(help='"The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes"', args=[d.arg(name='qosClass', type=d.T.string)]), withQosClass(qosClass): { qosClass: qosClass }, '#withReason':: d.fn(help="\"A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, + '#withResize':: d.fn(help="\"Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \\\"Proposed\\", args=[d.arg(name='resize', type=d.T.string)]), + withResize(resize): { resize: resize }, + '#withResourceClaimStatuses':: d.fn(help='"Status of resource claims."', args=[d.arg(name='resourceClaimStatuses', type=d.T.array)]), + withResourceClaimStatuses(resourceClaimStatuses): { resourceClaimStatuses: if std.isArray(v=resourceClaimStatuses) then resourceClaimStatuses else [resourceClaimStatuses] }, + '#withResourceClaimStatusesMixin':: d.fn(help='"Status of resource claims."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaimStatuses', type=d.T.array)]), + withResourceClaimStatusesMixin(resourceClaimStatuses): { resourceClaimStatuses+: if std.isArray(v=resourceClaimStatuses) then resourceClaimStatuses else [resourceClaimStatuses] }, '#withStartTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='startTime', type=d.T.string)]), withStartTime(startTime): { startTime: startTime }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet index f24d76b16cf..4c12ae4ad6e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podTemplate', url='', help='"PodTemplate describes a template for creating copies of a predefined pod."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PodTemplate', args=[d.arg(name='name', type=d.T.string)]), @@ -55,12 +53,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -71,21 +67,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -93,9 +89,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -154,6 +150,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -169,7 +170,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -180,26 +181,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -214,9 +217,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -228,11 +231,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -244,26 +249,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet index 44510388a39..d0d92f58f3b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podTemplateSpec', url='', help='"PodTemplateSpec describes the data a pod should have when created from a template"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -102,6 +100,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { os+: { name: name } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -117,7 +120,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { securityContext+: { seccompProfile+: { type: type } } } }, @@ -128,26 +131,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { securityContext+: { fsGroup: fsGroup } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { securityContext+: { runAsGroup: runAsGroup } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { securityContext+: { runAsUser: runAsUser } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -162,9 +167,9 @@ withDnsPolicy(dnsPolicy): { spec+: { dnsPolicy: dnsPolicy } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { enableServiceLinks: enableServiceLinks } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } }, @@ -176,11 +181,13 @@ withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { hostPID: hostPID } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { hostUsers: hostUsers } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { hostname: hostname } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } }, @@ -192,26 +199,34 @@ withNodeSelector(nodeSelector): { spec+: { nodeSelector: nodeSelector } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { nodeSelector+: nodeSelector } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { overhead: overhead } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { overhead+: overhead } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { preemptionPolicy: preemptionPolicy } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { priority: priority } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { priorityClassName: priorityClassName } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { restartPolicy: restartPolicy } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { runtimeClassName: runtimeClassName } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { schedulerName: schedulerName } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { serviceAccount: serviceAccount } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet similarity index 66% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet index 79564673866..82be17b17fd 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='portworxVolumeSource', url='', help='"PortworxVolumeSource represents a Portworx volume resource."'), - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/preferredSchedulingTerm.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/preferredSchedulingTerm.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/preferredSchedulingTerm.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/preferredSchedulingTerm.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet similarity index 88% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet index c19a814092e..bd982b3f476 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet @@ -8,6 +8,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { exec+: { command+: if std.isArray(v=command) then command else [command] } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { grpc+: { port: port } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { grpc+: { service: service } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -38,7 +45,7 @@ withPeriodSeconds(periodSeconds): { periodSeconds: periodSeconds }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { successThreshold: successThreshold }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { terminationGracePeriodSeconds: terminationGracePeriodSeconds }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet new file mode 100644 index 00000000000..2bbb9a4d562 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='projectedVolumeSource', url='', help='"Represents a projected volume source"'), + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + withSources(sources): { sources: if std.isArray(v=sources) then sources else [sources] }, + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + withSourcesMixin(sources): { sources+: if std.isArray(v=sources) then sources else [sources] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet similarity index 76% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet index 65f5cc8af75..f83b09ce4f2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet @@ -1,17 +1,17 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='quobyteVolumeSource', url='', help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { group: group }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { registry: registry }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { tenant: tenant }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { user: user }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { volume: volume }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..5aa18be5d7d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rbdPersistentVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { keyring: keyring }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { pool: pool }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet new file mode 100644 index 00000000000..64840dff747 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet @@ -0,0 +1,27 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rbdVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { keyring: keyring }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { pool: pool }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet index 4356ae8ca69..db614455fba 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='replicationController', url='', help='"ReplicationController represents the configuration of a replication controller."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ReplicationController', args=[d.arg(name='name', type=d.T.string)]), @@ -57,12 +55,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -73,21 +69,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -95,9 +91,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -156,6 +152,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -171,7 +172,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -182,26 +183,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -216,9 +219,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -230,11 +233,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -246,26 +251,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet index 0893916e78d..b1363bd00c5 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet @@ -5,12 +5,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,9 +41,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -104,6 +102,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -119,7 +122,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -130,26 +133,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -164,9 +169,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -178,11 +183,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -194,26 +201,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet similarity index 91% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet index a0a72a9b700..b1a60d4b1d1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet @@ -13,7 +13,7 @@ withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, '#withReadyReplicas':: d.fn(help='"The number of ready replicas for this replication controller."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, - '#withReplicas':: d.fn(help='"Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet new file mode 100644 index 00000000000..b1275e4a101 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaim', url='', help='"ResourceClaim references one entry in PodSpec.ResourceClaims."'), + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet new file mode 100644 index 00000000000..27211745973 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceFieldSelector', url='', help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { containerName: containerName }, + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + withDivisor(divisor): { divisor: divisor }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet index c867c8321bb..b8fd09886ad 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='resourceQuota', url='', help='"ResourceQuota sets aggregate quota restrictions enforced per namespace"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ResourceQuota', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet new file mode 100644 index 00000000000..e760fe89837 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceRequirements', url='', help='"ResourceRequirements describes the compute resource requirements."'), + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { claims: if std.isArray(v=claims) then claims else [claims] }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { claims+: if std.isArray(v=claims) then claims else [claims] }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { limits: limits }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { limits+: limits }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { requests: requests }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { requests+: requests }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..b5b776df14d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleIOPersistentVolumeSource', url='', help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { gateway: gateway }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { storageMode: storageMode }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { storagePool: storagePool }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { system: system }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { volumeName: volumeName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet new file mode 100644 index 00000000000..4cc797c41a8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleIOVolumeSource', url='', help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { gateway: gateway }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { storageMode: storageMode }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { storagePool: storagePool }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { system: system }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { volumeName: volumeName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopeSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopeSelector.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopeSelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopeSelector.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seLinuxOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seLinuxOptions.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seLinuxOptions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seLinuxOptions.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet index 3228b65c2d6..9a34b45f5af 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='seccompProfile', url='', help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { localhostProfile: localhostProfile }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet index 868d6a33ba7..2f3784215c7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='secret', url='', help='"Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Secret', args=[d.arg(name='name', type=d.T.string)]), @@ -61,7 +59,7 @@ withStringData(stringData): { stringData: stringData }, '#withStringDataMixin':: d.fn(help='"stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='stringData', type=d.T.object)]), withStringDataMixin(stringData): { stringData+: stringData }, - '#withType':: d.fn(help='"Used to facilitate programmatic handling of secret data."', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretEnvSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretEnvSource.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretEnvSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretEnvSource.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretKeySelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretKeySelector.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretKeySelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretKeySelector.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet new file mode 100644 index 00000000000..b36accdbc13 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='secretProjection', url='', help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet index 764d03eb2d1..de3e3d3e520 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='secretReference', url='', help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet new file mode 100644 index 00000000000..8e012783cec --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='secretVolumeSource', url='', help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet index a9c9a201c4b..1d9d290c584 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet @@ -25,7 +25,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { seccompProfile+: { localhostProfile: localhostProfile } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { seccompProfile+: { type: type } }, @@ -36,22 +36,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { windowsOptions+: { hostProcess: hostProcess } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { windowsOptions+: { runAsUserName: runAsUserName } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { allowPrivilegeEscalation: allowPrivilegeEscalation }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { privileged: privileged }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { procMount: procMount }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { readOnlyRootFilesystem: readOnlyRootFilesystem }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { runAsGroup: runAsGroup }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { runAsNonRoot: runAsNonRoot }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { runAsUser: runAsUser }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet similarity index 70% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet index aabed7a52eb..ede4de85b04 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='service', url='', help='"Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Service', args=[d.arg(name='name', type=d.T.string)]), @@ -61,13 +59,13 @@ withTimeoutSeconds(timeoutSeconds): { spec+: { sessionAffinityConfig+: { clientIP+: { timeoutSeconds: timeoutSeconds } } } }, }, }, - '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), + '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), withAllocateLoadBalancerNodePorts(allocateLoadBalancerNodePorts): { spec+: { allocateLoadBalancerNodePorts: allocateLoadBalancerNodePorts } }, '#withClusterIP':: d.fn(help='"clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIP', type=d.T.string)]), withClusterIP(clusterIP): { spec+: { clusterIP: clusterIP } }, - '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPs(clusterIPs): { spec+: { clusterIPs: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] } }, - '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPsMixin(clusterIPs): { spec+: { clusterIPs+: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] } }, '#withExternalIPs':: d.fn(help='"externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system."', args=[d.arg(name='externalIPs', type=d.T.array)]), withExternalIPs(externalIPs): { spec+: { externalIPs: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] } }, @@ -75,25 +73,25 @@ withExternalIPsMixin(externalIPs): { spec+: { externalIPs+: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] } }, '#withExternalName':: d.fn(help='"externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \\"ExternalName\\"."', args=[d.arg(name='externalName', type=d.T.string)]), withExternalName(externalName): { spec+: { externalName: externalName } }, - '#withExternalTrafficPolicy':: d.fn(help='"externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \\"Local\\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \\"Cluster\\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading."', args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), + '#withExternalTrafficPolicy':: d.fn(help="\"externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \\\"externally-facing\\\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \\\"Local\\\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \\\"Cluster\\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \\\"Cluster\\\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.\"", args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), withExternalTrafficPolicy(externalTrafficPolicy): { spec+: { externalTrafficPolicy: externalTrafficPolicy } }, - '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type)."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), + '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), withHealthCheckNodePort(healthCheckNodePort): { spec+: { healthCheckNodePort: healthCheckNodePort } }, - '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \\"Cluster\\" routes internal traffic to a Service to all endpoints. \\"Local\\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \\"Cluster\\"."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), + '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \\"Local\\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \\"Cluster\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features)."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), withInternalTrafficPolicy(internalTrafficPolicy): { spec+: { internalTrafficPolicy: internalTrafficPolicy } }, - '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamilies(ipFamilies): { spec+: { ipFamilies: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] } }, - '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamiliesMixin(ipFamilies): { spec+: { ipFamilies+: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] } }, - '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \\"IPv6DualStack\\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), + '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), withIpFamilyPolicy(ipFamilyPolicy): { spec+: { ipFamilyPolicy: ipFamilyPolicy } }, '#withLoadBalancerClass':: d.fn(help="\"loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\"", args=[d.arg(name='loadBalancerClass', type=d.T.string)]), withLoadBalancerClass(loadBalancerClass): { spec+: { loadBalancerClass: loadBalancerClass } }, - '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), + '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), withLoadBalancerIP(loadBalancerIP): { spec+: { loadBalancerIP: loadBalancerIP } }, - '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRanges(loadBalancerSourceRanges): { spec+: { loadBalancerSourceRanges: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] } }, - '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRangesMixin(loadBalancerSourceRanges): { spec+: { loadBalancerSourceRanges+: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] } }, '#withPorts':: d.fn(help='"The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { spec+: { ports: if std.isArray(v=ports) then ports else [ports] } }, @@ -107,10 +105,6 @@ withSelectorMixin(selector): { spec+: { selector+: selector } }, '#withSessionAffinity':: d.fn(help='"Supports \\"ClientIP\\" and \\"None\\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='sessionAffinity', type=d.T.string)]), withSessionAffinity(sessionAffinity): { spec+: { sessionAffinity: sessionAffinity } }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { spec+: { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] } }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { spec+: { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] } }, '#withType':: d.fn(help='"type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \\"ClusterIP\\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \\"None\\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \\"NodePort\\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \\"LoadBalancer\\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \\"ExternalName\\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { type: type } }, }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet index f67ff388c54..c8fd262c34f 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='serviceAccount', url='', help='"ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ServiceAccount', args=[d.arg(name='name', type=d.T.string)]), @@ -57,9 +55,9 @@ withImagePullSecrets(imagePullSecrets): { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, - '#withSecrets':: d.fn(help='"Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret"', args=[d.arg(name='secrets', type=d.T.array)]), + '#withSecrets':: d.fn(help='"Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \\"kubernetes.io/enforce-mountable-secrets\\" annotation set to \\"true\\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret"', args=[d.arg(name='secrets', type=d.T.array)]), withSecrets(secrets): { secrets: if std.isArray(v=secrets) then secrets else [secrets] }, - '#withSecretsMixin':: d.fn(help='"Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='secrets', type=d.T.array)]), + '#withSecretsMixin':: d.fn(help='"Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \\"kubernetes.io/enforce-mountable-secrets\\" annotation set to \\"true\\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='secrets', type=d.T.array)]), withSecretsMixin(secrets): { secrets+: if std.isArray(v=secrets) then secrets else [secrets] }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet index 8b1dac4db2b..17e6b902215 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='serviceAccountTokenProjection', url='', help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, - '#withPath':: d.fn(help='"Path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet similarity index 68% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet index 69d7a842877..5a1fa5a5a04 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='servicePort', url='', help="\"ServicePort contains information on service's port.\""), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default."', args=[d.arg(name='appProtocol', type=d.T.string)]), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), withAppProtocol(appProtocol): { appProtocol: appProtocol }, '#withName':: d.fn(help="\"The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.\"", args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet similarity index 63% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet index 8d3b794393a..0f8645393d7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet @@ -9,13 +9,13 @@ withTimeoutSeconds(timeoutSeconds): { sessionAffinityConfig+: { clientIP+: { timeoutSeconds: timeoutSeconds } } }, }, }, - '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), + '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), withAllocateLoadBalancerNodePorts(allocateLoadBalancerNodePorts): { allocateLoadBalancerNodePorts: allocateLoadBalancerNodePorts }, '#withClusterIP':: d.fn(help='"clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIP', type=d.T.string)]), withClusterIP(clusterIP): { clusterIP: clusterIP }, - '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPs(clusterIPs): { clusterIPs: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] }, - '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPsMixin(clusterIPs): { clusterIPs+: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] }, '#withExternalIPs':: d.fn(help='"externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system."', args=[d.arg(name='externalIPs', type=d.T.array)]), withExternalIPs(externalIPs): { externalIPs: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] }, @@ -23,25 +23,25 @@ withExternalIPsMixin(externalIPs): { externalIPs+: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] }, '#withExternalName':: d.fn(help='"externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \\"ExternalName\\"."', args=[d.arg(name='externalName', type=d.T.string)]), withExternalName(externalName): { externalName: externalName }, - '#withExternalTrafficPolicy':: d.fn(help='"externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \\"Local\\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \\"Cluster\\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading."', args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), + '#withExternalTrafficPolicy':: d.fn(help="\"externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \\\"externally-facing\\\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \\\"Local\\\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \\\"Cluster\\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \\\"Cluster\\\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.\"", args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), withExternalTrafficPolicy(externalTrafficPolicy): { externalTrafficPolicy: externalTrafficPolicy }, - '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type)."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), + '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), withHealthCheckNodePort(healthCheckNodePort): { healthCheckNodePort: healthCheckNodePort }, - '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \\"Cluster\\" routes internal traffic to a Service to all endpoints. \\"Local\\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \\"Cluster\\"."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), + '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \\"Local\\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \\"Cluster\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features)."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), withInternalTrafficPolicy(internalTrafficPolicy): { internalTrafficPolicy: internalTrafficPolicy }, - '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamilies(ipFamilies): { ipFamilies: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] }, - '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamiliesMixin(ipFamilies): { ipFamilies+: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] }, - '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \\"IPv6DualStack\\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), + '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), withIpFamilyPolicy(ipFamilyPolicy): { ipFamilyPolicy: ipFamilyPolicy }, '#withLoadBalancerClass':: d.fn(help="\"loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\"", args=[d.arg(name='loadBalancerClass', type=d.T.string)]), withLoadBalancerClass(loadBalancerClass): { loadBalancerClass: loadBalancerClass }, - '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), + '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), withLoadBalancerIP(loadBalancerIP): { loadBalancerIP: loadBalancerIP }, - '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRanges(loadBalancerSourceRanges): { loadBalancerSourceRanges: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] }, - '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRangesMixin(loadBalancerSourceRanges): { loadBalancerSourceRanges+: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] }, '#withPorts':: d.fn(help='"The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, @@ -55,10 +55,6 @@ withSelectorMixin(selector): { selector+: selector }, '#withSessionAffinity':: d.fn(help='"Supports \\"ClientIP\\" and \\"None\\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='sessionAffinity', type=d.T.string)]), withSessionAffinity(sessionAffinity): { sessionAffinity: sessionAffinity }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, '#withType':: d.fn(help='"type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \\"ClusterIP\\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \\"None\\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \\"NodePort\\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \\"LoadBalancer\\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \\"ExternalName\\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sessionAffinityConfig.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sessionAffinityConfig.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sessionAffinityConfig.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sessionAffinityConfig.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet new file mode 100644 index 00000000000..e863a06e935 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='sleepAction', url='', help='"SleepAction describes a \\"sleep\\" action."'), + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { seconds: seconds }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet index b41c58aebfb..f200e9a0fac 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet @@ -18,13 +18,13 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { secretRef+: { uid: uid } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { volumeNamespace: volumeNamespace }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet similarity index 70% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet index 2aa3947116a..5e2cbf6b593 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet @@ -6,13 +6,13 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { volumeNamespace: volumeNamespace }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sysctl.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sysctl.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sysctl.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sysctl.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/taint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/taint.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/taint.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/taint.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/tcpSocketAction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/tcpSocketAction.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/tcpSocketAction.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/tcpSocketAction.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/toleration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/toleration.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/toleration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/toleration.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorLabelRequirement.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorLabelRequirement.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorLabelRequirement.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorLabelRequirement.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorTerm.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorTerm.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorTerm.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorTerm.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet new file mode 100644 index 00000000000..411dde629c7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='topologySpreadConstraint', url='', help='"TopologySpreadConstraint specifies how to spread matching pods among the given topology."'), + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMaxSkew':: d.fn(help="\"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.\"", args=[d.arg(name='maxSkew', type=d.T.integer)]), + withMaxSkew(maxSkew): { maxSkew: maxSkew }, + '#withMinDomains':: d.fn(help="\"MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \\\"global minimum\\\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\\n\\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \\\"global minimum\\\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\\n\\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).\"", args=[d.arg(name='minDomains', type=d.T.integer)]), + withMinDomains(minDomains): { minDomains: minDomains }, + '#withNodeAffinityPolicy':: d.fn(help="\"NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\\n\\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\"", args=[d.arg(name='nodeAffinityPolicy', type=d.T.string)]), + withNodeAffinityPolicy(nodeAffinityPolicy): { nodeAffinityPolicy: nodeAffinityPolicy }, + '#withNodeTaintsPolicy':: d.fn(help='"NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\\n\\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."', args=[d.arg(name='nodeTaintsPolicy', type=d.T.string)]), + withNodeTaintsPolicy(nodeTaintsPolicy): { nodeTaintsPolicy: nodeTaintsPolicy }, + '#withTopologyKey':: d.fn(help="\"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \\\"bucket\\\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \\\"kubernetes.io/hostname\\\", each Node is a domain of that topology. And, if TopologyKey is \\\"topology.kubernetes.io/zone\\\", each zone is a domain of that topology. It's a required field.\"", args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + '#withWhenUnsatisfiable':: d.fn(help="\"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\\n but giving higher precedence to topologies that would help reduce the\\n skew.\\nA constraint is considered \\\"Unsatisfiable\\\" for an incoming pod if and only if every possible node assignment for that pod would violate \\\"MaxSkew\\\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\"", args=[d.arg(name='whenUnsatisfiable', type=d.T.string)]), + withWhenUnsatisfiable(whenUnsatisfiable): { whenUnsatisfiable: whenUnsatisfiable }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/typedLocalObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedLocalObjectReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/typedLocalObjectReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedLocalObjectReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet new file mode 100644 index 00000000000..5ab88f46f54 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typedObjectReference', url='', help=''), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet similarity index 57% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet index bd3ec56b643..f78582150aa 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet @@ -3,37 +3,37 @@ '#':: d.pkg(name='volume', url='', help='"Volume represents a named volume in a pod that may be accessed by any container in the pod."'), '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { azureDisk+: { diskName: diskName } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { azureDisk+: { fsType: fsType } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { azureDisk+: { kind: kind } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { azureFile+: { secretName: secretName } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { azureFile+: { shareName: shareName } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), @@ -43,17 +43,17 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cephfs+: { secretRef+: { name: name } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { cephfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { cephfs+: { user: user } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), @@ -63,24 +63,24 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cinder+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { cinder+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, }, '#configMap':: d.obj(help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), configMap: { - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { configMap+: { defaultMode: defaultMode } }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { configMap+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), withOptional(optional): { configMap+: { optional: optional } }, }, '#csi':: d.obj(help='"Represents a source location of a volume to mount, managed by an external CSI driver"'), @@ -90,15 +90,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { csi+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { csi+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, - '#withVolumeAttributes':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, - '#withVolumeAttributesMixin':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, }, '#downwardAPI':: d.obj(help='"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling."'), @@ -112,9 +112,9 @@ }, '#emptyDir':: d.obj(help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), emptyDir: { - '#withMedium':: d.fn(help="\"What type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), withMedium(medium): { emptyDir+: { medium: medium } }, - '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), + '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), withSizeLimit(sizeLimit): { emptyDir+: { sizeLimit: sizeLimit } }, }, '#ephemeral':: d.obj(help='"Represents an ephemeral volume that is handled by a normal storage driver."'), @@ -123,12 +123,10 @@ volumeClaimTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { ephemeral+: { volumeClaimTemplate+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { ephemeral+: { volumeClaimTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -139,21 +137,21 @@ withFinalizers(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { ephemeral+: { volumeClaimTemplate+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { ephemeral+: { volumeClaimTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { ephemeral+: { volumeClaimTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { ephemeral+: { volumeClaimTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { ephemeral+: { volumeClaimTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { ephemeral+: { volumeClaimTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -161,9 +159,9 @@ withOwnerReferencesMixin(ownerReferences): { ephemeral+: { volumeClaimTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { ephemeral+: { volumeClaimTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { ephemeral+: { volumeClaimTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { ephemeral+: { volumeClaimTemplate+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -177,15 +175,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -199,34 +208,36 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } } }, }, }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fc+: { fsType: fsType } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { fc+: { lun: lun } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, }, '#flexVolume':: d.obj(help='"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin."'), @@ -236,58 +247,58 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { flexVolume+: { secretRef+: { name: name } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { flexVolume+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { flexVolume+: { fsType: fsType } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { flexVolume+: { options: options } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { flexVolume+: { options+: options } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { gcePersistentDisk+: { partition: partition } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, }, '#gitRepo':: d.obj(help="\"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\\n\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.\""), gitRepo: { - '#withDirectory':: d.fn(help="\"Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + '#withDirectory':: d.fn(help="\"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), withDirectory(directory): { gitRepo+: { directory: directory } }, - '#withRepository':: d.fn(help='"Repository URL"', args=[d.arg(name='repository', type=d.T.string)]), + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), withRepository(repository): { gitRepo+: { repository: repository } }, - '#withRevision':: d.fn(help='"Commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), withRevision(revision): { gitRepo+: { revision: revision } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { glusterfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { hostPath+: { path: path } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { hostPath+: { type: type } }, }, '#iscsi':: d.obj(help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), @@ -297,83 +308,83 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { iscsi+: { secretRef+: { name: name } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { iscsi+: { fsType: fsType } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { iscsi+: { iqn: iqn } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { iscsi+: { lun: lun } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { nfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { nfs+: { server: server } }, }, '#persistentVolumeClaim':: d.obj(help="\"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).\""), persistentVolumeClaim: { - '#withClaimName':: d.fn(help='"ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), withClaimName(claimName): { persistentVolumeClaim+: { claimName: claimName } }, - '#withReadOnly':: d.fn(help='"Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { persistentVolumeClaim+: { readOnly: readOnly } }, }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { portworxVolume+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, }, '#projected':: d.obj(help='"Represents a projected volume source"'), projected: { - '#withDefaultMode':: d.fn(help='"Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { projected+: { defaultMode: defaultMode } }, - '#withSources':: d.fn(help='"list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), withSources(sources): { projected+: { sources: if std.isArray(v=sources) then sources else [sources] } }, - '#withSourcesMixin':: d.fn(help='"list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), withSourcesMixin(sources): { projected+: { sources+: if std.isArray(v=sources) then sources else [sources] } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { quobyte+: { group: group } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { quobyte+: { registry: registry } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { quobyte+: { tenant: tenant } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { quobyte+: { user: user } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { quobyte+: { volume: volume } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), @@ -383,21 +394,21 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { rbd+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { rbd+: { fsType: fsType } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { rbd+: { image: image } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { rbd+: { keyring: keyring } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { rbd+: { pool: pool } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { rbd+: { user: user } }, }, '#scaleIO':: d.obj(help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), @@ -407,36 +418,36 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleIO+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { scaleIO+: { fsType: fsType } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { scaleIO+: { gateway: gateway } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { scaleIO+: { system: system } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, }, '#secret':: d.obj(help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), secret: { - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { secret+: { defaultMode: defaultMode } }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), withOptional(optional): { secret+: { optional: optional } }, - '#withSecretName':: d.fn(help="\"Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { secret+: { secretName: secretName } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -446,27 +457,27 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { storageos+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { storageos+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, }, - '#withName':: d.fn(help="\"Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\"", args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeDevice.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeDevice.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeDevice.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeDevice.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeMount.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeMount.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeMount.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeMount.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeNodeAffinity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeNodeAffinity.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeNodeAffinity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeNodeAffinity.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet new file mode 100644 index 00000000000..80ce06af152 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeProjection', url='', help='"Projection that may be projected along with other supported volume types"'), + '#clusterTrustBundle':: d.obj(help='"ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem."'), + clusterTrustBundle: { + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { clusterTrustBundle+: { name: name } }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { clusterTrustBundle+: { optional: optional } }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { clusterTrustBundle+: { path: path } }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { clusterTrustBundle+: { signerName: signerName } }, + }, + '#configMap':: d.obj(help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), + configMap: { + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#downwardAPI':: d.obj(help='"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode."'), + downwardAPI: { + '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#secret':: d.obj(help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), + secret: { + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secret+: { name: name } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + }, + '#serviceAccountToken':: d.obj(help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), + serviceAccountToken: { + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + withAudience(audience): { serviceAccountToken+: { audience: audience } }, + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { serviceAccountToken+: { path: path } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet similarity index 64% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet index 4a585a66da2..63bede95a8c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceRequirements', url='', help='"ResourceRequirements describes the compute resource requirements."'), + '#':: d.pkg(name='volumeResourceRequirements', url='', help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { limits: limits }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { limits+: limits }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { requests: requests }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { requests+: requests }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..61cb5cfde2b --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='vsphereVirtualDiskVolumeSource', url='', help='"Represents a vSphere volume resource."'), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + withStoragePolicyID(storagePolicyID): { storagePolicyID: storagePolicyID }, + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + withStoragePolicyName(storagePolicyName): { storagePolicyName: storagePolicyName }, + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + withVolumePath(volumePath): { volumePath: volumePath }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet similarity index 61% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet index 3e01af44be1..f1bc1092cd2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet @@ -25,9 +25,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, }, - '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, - '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet index b828906f099..431c79aa82b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet @@ -5,6 +5,8 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { gmsaCredentialSpec: gmsaCredentialSpec }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { gmsaCredentialSpecName: gmsaCredentialSpecName }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { hostProcess: hostProcess }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { runAsUserName: runAsUserName }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet index 1b7f8b0952e..166970c3673 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='discovery', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet similarity index 89% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet index 0eff672d68a..7b977662d75 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet @@ -3,11 +3,11 @@ '#':: d.pkg(name='endpoint', url='', help='"Endpoint represents a single logical \\"backend\\" implementing a service."'), '#conditions':: d.obj(help='"EndpointConditions represents the current condition of an endpoint."'), conditions: { - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { conditions+: { ready: ready } }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), + '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition."', args=[d.arg(name='serving', type=d.T.boolean)]), withServing(serving): { conditions+: { serving: serving } }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), + '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating."', args=[d.arg(name='terminating', type=d.T.boolean)]), withTerminating(terminating): { conditions+: { terminating: terminating } }, }, '#hints':: d.obj(help='"EndpointHints provides hints describing how an endpoint should be consumed."'), @@ -34,9 +34,9 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { targetRef+: { uid: uid } }, }, - '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267"', args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, '#withDeprecatedTopology':: d.fn(help='"deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead."', args=[d.arg(name='deprecatedTopology', type=d.T.object)]), withDeprecatedTopology(deprecatedTopology): { deprecatedTopology: deprecatedTopology }, @@ -44,7 +44,7 @@ withDeprecatedTopologyMixin(deprecatedTopology): { deprecatedTopology+: deprecatedTopology }, '#withHostname':: d.fn(help='"hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation."', args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, '#withZone':: d.fn(help='"zone is the name of the Zone this endpoint exists in."', args=[d.arg(name='zone', type=d.T.string)]), withZone(zone): { zone: zone }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet index 9a99830684d..fc7c80f2663 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='endpointConditions', url='', help='"EndpointConditions represents the current condition of an endpoint."'), - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { ready: ready }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), + '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition."', args=[d.arg(name='serving', type=d.T.boolean)]), withServing(serving): { serving: serving }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), + '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating."', args=[d.arg(name='terminating', type=d.T.boolean)]), withTerminating(terminating): { terminating: terminating }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointHints.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointHints.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointHints.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointHints.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet new file mode 100644 index 00000000000..a3d80f6fee4 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), + withAppProtocol(appProtocol): { appProtocol: appProtocol }, + '#withName':: d.fn(help="\"name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPort':: d.fn(help='"port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet index 95f0b2c3018..2b74dbfcf47 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='endpointSlice', url='', help='"EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of EndpointSlice', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/forZone.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/forZone.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet index c60a9f4d15d..656164c451e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='events', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet index bcab9dec0b9..44db17976a6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet @@ -10,12 +10,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -26,21 +24,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -48,9 +46,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/eventSeries.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/eventSeries.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/eventSeries.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/eventSeries.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet similarity index 55% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet index f20b724e04c..c3d969d1ef2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet @@ -1,5 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='flowcontrol', url='', help=''), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1: (import 'v1/main.libsonnet'), + v1beta3: (import 'v1beta3/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..7ee3000ff9a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='exemptPriorityLevelConfiguration', url='', help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowDistinguisherMethod.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowDistinguisherMethod.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet index 1e5df6febec..d27ead1fd4b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='flowSchema', url='', help='"FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \\"flow distinguisher\\"."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of FlowSchema', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta1', + apiVersion: 'flowcontrol.apiserver.k8s.io/v1', kind: 'FlowSchema', } + self.metadata.withName(name=name), '#spec':: d.obj(help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/groupSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/groupSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitResponse.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitResponse.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..20776090efc --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limitResponse+: { queuing+: { handSize: handSize } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limitResponse+: { queuing+: { queues: queues } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limitResponse+: { type: type } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { borrowingLimitPercent: borrowingLimitPercent }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet similarity index 91% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet index c386b72b841..1a114f03e60 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), + '#':: d.pkg(name='v1', url='', help=''), + exemptPriorityLevelConfiguration: (import 'exemptPriorityLevelConfiguration.libsonnet'), flowDistinguisherMethod: (import 'flowDistinguisherMethod.libsonnet'), flowSchema: (import 'flowSchema.libsonnet'), flowSchemaCondition: (import 'flowSchemaCondition.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/nonResourcePolicyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/nonResourcePolicyRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/policyRulesWithSubjects.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/policyRulesWithSubjects.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..5d93abb5070 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet @@ -0,0 +1,89 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfiguration', url='', help='"PriorityLevelConfiguration represents the configuration of a priority level."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of PriorityLevelConfiguration', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'flowcontrol.apiserver.k8s.io/v1', + kind: 'PriorityLevelConfiguration', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + spec: { + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { exempt+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { spec+: { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { spec+: { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { spec+: { limited+: { limitResponse+: { queuing+: { queues: queues } } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { limited+: { limitResponse+: { type: type } } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { spec+: { limited+: { borrowingLimitPercent: borrowingLimitPercent } } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { limited+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { type: type } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet new file mode 100644 index 00000000000..cab1ca91362 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { exempt+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limited+: { limitResponse+: { type: type } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { limited+: { borrowingLimitPercent: borrowingLimitPercent } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { limited+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/queuingConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/queuingConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet similarity index 94% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet index 6f0930299df..4b74e679f01 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.\""), + '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\\\"\\\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.\""), '#withApiGroups':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withApiGroupsMixin':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/serviceAccountSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/serviceAccountSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet similarity index 92% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet index 120fe3b49f2..16f120b33b5 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet @@ -18,7 +18,7 @@ '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { user+: { name: name } }, }, - '#withKind':: d.fn(help='"Required"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"`kind` indicates which one of the other fields is non-empty. Required"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { kind: kind }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/userSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/userSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..7ee3000ff9a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='exemptPriorityLevelConfiguration', url='', help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowDistinguisherMethod.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowDistinguisherMethod.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet index 1e5df6febec..70fdc485952 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchema.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='flowSchema', url='', help='"FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \\"flow distinguisher\\"."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of FlowSchema', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta1', + apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta3', kind: 'FlowSchema', } + self.metadata.withName(name=name), '#spec':: d.obj(help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/flowSchemaStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/groupSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/groupSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitResponse.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitResponse.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet similarity index 50% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet index a9e81cd73e6..de52c78de37 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), + '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), limitResponse: { '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), @@ -15,8 +15,12 @@ '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { limitResponse+: { type: type } }, }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { assuredConcurrencyShares: assuredConcurrencyShares }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { borrowingLimitPercent: borrowingLimitPercent }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet new file mode 100644 index 00000000000..7085f0abbf6 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta3', url='', help=''), + exemptPriorityLevelConfiguration: (import 'exemptPriorityLevelConfiguration.libsonnet'), + flowDistinguisherMethod: (import 'flowDistinguisherMethod.libsonnet'), + flowSchema: (import 'flowSchema.libsonnet'), + flowSchemaCondition: (import 'flowSchemaCondition.libsonnet'), + flowSchemaSpec: (import 'flowSchemaSpec.libsonnet'), + flowSchemaStatus: (import 'flowSchemaStatus.libsonnet'), + groupSubject: (import 'groupSubject.libsonnet'), + limitResponse: (import 'limitResponse.libsonnet'), + limitedPriorityLevelConfiguration: (import 'limitedPriorityLevelConfiguration.libsonnet'), + nonResourcePolicyRule: (import 'nonResourcePolicyRule.libsonnet'), + policyRulesWithSubjects: (import 'policyRulesWithSubjects.libsonnet'), + priorityLevelConfiguration: (import 'priorityLevelConfiguration.libsonnet'), + priorityLevelConfigurationCondition: (import 'priorityLevelConfigurationCondition.libsonnet'), + priorityLevelConfigurationReference: (import 'priorityLevelConfigurationReference.libsonnet'), + priorityLevelConfigurationSpec: (import 'priorityLevelConfigurationSpec.libsonnet'), + priorityLevelConfigurationStatus: (import 'priorityLevelConfigurationStatus.libsonnet'), + queuingConfiguration: (import 'queuingConfiguration.libsonnet'), + resourcePolicyRule: (import 'resourcePolicyRule.libsonnet'), + serviceAccountSubject: (import 'serviceAccountSubject.libsonnet'), + subject: (import 'subject.libsonnet'), + userSubject: (import 'userSubject.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/nonResourcePolicyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/nonResourcePolicyRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/policyRulesWithSubjects.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/policyRulesWithSubjects.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet similarity index 69% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet index 76f42703756..b13be03a4ea 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='priorityLevelConfiguration', url='', help='"PriorityLevelConfiguration represents the configuration of a priority level."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,19 +39,26 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PriorityLevelConfiguration', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta1', + apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta3', kind: 'PriorityLevelConfiguration', } + self.metadata.withName(name=name), '#spec':: d.obj(help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), spec: { - '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { exempt+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), limited: { '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), limitResponse: { @@ -69,8 +74,12 @@ '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { limited+: { limitResponse+: { type: type } } } }, }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { spec+: { limited+: { assuredConcurrencyShares: assuredConcurrencyShares } } }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { spec+: { limited+: { borrowingLimitPercent: borrowingLimitPercent } } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { limited+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, }, '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { type: type } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationCondition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationCondition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet new file mode 100644 index 00000000000..e035cdb2d82 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { exempt+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limited+: { limitResponse+: { type: type } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { limited+: { borrowingLimitPercent: borrowingLimitPercent } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { limited+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/queuingConfiguration.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/queuingConfiguration.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet similarity index 94% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet index 6f0930299df..4b74e679f01 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/resourcePolicyRule.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.\""), + '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\\\"\\\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.\""), '#withApiGroups':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withApiGroupsMixin':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/serviceAccountSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/serviceAccountSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet similarity index 92% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet index 120fe3b49f2..16f120b33b5 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/subject.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet @@ -18,7 +18,7 @@ '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { user+: { name: name } }, }, - '#withKind':: d.fn(help='"Required"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"`kind` indicates which one of the other fields is non-empty. Required"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { kind: kind }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/userSubject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/userSubject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiGroup.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiGroup.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiGroupList.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiGroupList.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiResource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiResource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiResourceList.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiResourceList.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiVersions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/apiVersions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/condition.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/condition.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/deleteOptions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/deleteOptions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/fieldsV1.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/fieldsV1.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/groupVersionForDiscovery.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/groupVersionForDiscovery.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/labelSelector.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/labelSelector.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/labelSelectorRequirement.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/labelSelectorRequirement.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/listMeta.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet similarity index 89% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/listMeta.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet index c3204e3b528..b5fba9a144d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/listMeta.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet @@ -7,7 +7,7 @@ withRemainingItemCount(remainingItemCount): { remainingItemCount: remainingItemCount }, '#withResourceVersion':: d.fn(help="\"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\"", args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { resourceVersion: resourceVersion }, - '#withSelfLink':: d.fn(help='"selfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { selfLink: selfLink }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/managedFieldsEntry.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/managedFieldsEntry.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet index 41fb04f0826..4c8e5648ede 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/managedFieldsEntry.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet @@ -11,6 +11,8 @@ withManager(manager): { manager: manager }, '#withOperation':: d.fn(help="\"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.\"", args=[d.arg(name='operation', type=d.T.string)]), withOperation(operation): { operation: operation }, + '#withSubresource':: d.fn(help='"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource."', args=[d.arg(name='subresource', type=d.T.string)]), + withSubresource(subresource): { subresource: subresource }, '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), withTime(time): { time: time }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/microTime.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/microTime.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/objectMeta.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/objectMeta.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet index 5eda60b5d70..eb0f0cae8ed 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/objectMeta.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet @@ -1,12 +1,10 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='objectMeta', url='', help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { annotations: annotations }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { annotations+: annotations }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { clusterName: clusterName }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { creationTimestamp: creationTimestamp }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -17,21 +15,21 @@ withFinalizers(finalizers): { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { generateName: generateName }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { generation: generation }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { labels: labels }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { labels+: labels }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] }, @@ -39,9 +37,9 @@ withOwnerReferencesMixin(ownerReferences): { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { resourceVersion: resourceVersion }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { selfLink: selfLink }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { uid: uid }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/ownerReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet similarity index 62% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/ownerReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet index c819c0715e0..66963fbd4e8 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/ownerReference.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet @@ -1,15 +1,15 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='ownerReference', url='', help='"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field."'), - '#withBlockOwnerDeletion':: d.fn(help='"If true, AND if the owner has the \\"foregroundDeletion\\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \\"delete\\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned."', args=[d.arg(name='blockOwnerDeletion', type=d.T.boolean)]), + '#withBlockOwnerDeletion':: d.fn(help='"If true, AND if the owner has the \\"foregroundDeletion\\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \\"delete\\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned."', args=[d.arg(name='blockOwnerDeletion', type=d.T.boolean)]), withBlockOwnerDeletion(blockOwnerDeletion): { blockOwnerDeletion: blockOwnerDeletion }, '#withController':: d.fn(help='"If true, this reference points to the managing controller."', args=[d.arg(name='controller', type=d.T.boolean)]), withController(controller): { controller: controller }, '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withUid':: d.fn(help='"UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { uid: uid }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/patch.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/patch.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/preconditions.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/preconditions.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/serverAddressByClientCIDR.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/serverAddressByClientCIDR.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/statusCause.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/statusCause.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/statusDetails.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet similarity index 93% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/statusDetails.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet index 9997f779eed..31a77b432a2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/statusDetails.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet @@ -13,7 +13,7 @@ withName(name): { name: name }, '#withRetryAfterSeconds':: d.fn(help='"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."', args=[d.arg(name='retryAfterSeconds', type=d.T.integer)]), withRetryAfterSeconds(retryAfterSeconds): { retryAfterSeconds: retryAfterSeconds }, - '#withUid':: d.fn(help='"UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { uid: uid }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/time.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/time.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet new file mode 100644 index 00000000000..0eda21e5050 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='watchEvent', url='', help='"Event represents a single event to a watched resource."'), + '#new':: d.fn(help='new returns an instance of WatchEvent', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'storage.k8s.io/v1beta1', + kind: 'WatchEvent', + } + self.metadata.withName(name=name), + '#withObject':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='object', type=d.T.object)]), + withObject(object): { object: object }, + '#withObjectMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='object', type=d.T.object)]), + withObjectMixin(object): { object+: object }, + '#withType':: d.fn(help='', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet index 48137d12473..5eded79427c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet @@ -2,5 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='networking', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet index a50ee2feb2d..47fbe973509 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet @@ -16,18 +16,18 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { backend+: { service+: { port+: { name: name } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { backend+: { service+: { port+: { number: number } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { backend+: { service+: { name: name } } }, }, }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help="\"path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \\\"Exact\\\" or \\\"Prefix\\\".\"", args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types.\"", args=[d.arg(name='pathType', type=d.T.string)]), + '#withPathType':: d.fn(help="\"pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types.\"", args=[d.arg(name='pathType', type=d.T.string)]), withPathType(pathType): { pathType: pathType }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet similarity index 64% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet index a54d695b4fc..7d54756ae2e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), + '#withPaths':: d.fn(help='"paths is a collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), + '#withPathsMixin':: d.fn(help='"paths is a collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet index 422b2a630c2..15464bc5e6a 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), @@ -68,24 +66,24 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { defaultBackend+: { service+: { port+: { name: name } } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { spec+: { defaultBackend+: { service+: { port+: { number: number } } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { defaultBackend+: { service+: { name: name } } } }, }, }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), + '#withIngressClassName':: d.fn(help='"ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present."', args=[d.arg(name='ingressClassName', type=d.T.string)]), withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), + '#withRules':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + '#withRulesMixin':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), + '#withTls':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), + '#withTlsMixin':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet similarity index 90% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet index c2a7000ac5a..803c12f7c33 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet @@ -14,12 +14,12 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { service+: { port+: { name: name } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { service+: { port+: { number: number } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { service+: { name: name } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet index fbd68ac9f07..5e76306f213 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='ingressClass', url='', help='"IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of IngressClass', args=[d.arg(name='name', type=d.T.string)]), @@ -55,18 +53,18 @@ spec: { '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { spec+: { parameters+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { parameters+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { parameters+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { parameters+: { namespace: namespace } } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { spec+: { parameters+: { scope: scope } } }, }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), + '#withController':: d.fn(help='"controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), withController(controller): { spec+: { controller: controller } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet similarity index 69% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet index b1ed8b58500..1c17567d48b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet @@ -1,15 +1,15 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='ingressClassParametersReference', url='', help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { scope: scope }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet similarity index 73% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet index ebbc86576cb..16a9f053825 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet @@ -3,18 +3,18 @@ '#':: d.pkg(name='ingressClassSpec', url='', help='"IngressClassSpec provides information about the class of an Ingress."'), '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { parameters+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { parameters+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { parameters+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { parameters+: { namespace: namespace } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { parameters+: { scope: scope } }, }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), + '#withController':: d.fn(help='"controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), withController(controller): { controller: controller }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet new file mode 100644 index 00000000000..ee9e9abddbc --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressLoadBalancerIngress', url='', help='"IngressLoadBalancerIngress represents the status of a load-balancer ingress point."'), + '#withHostname':: d.fn(help='"hostname is set for load-balancer ingress points that are DNS based."', args=[d.arg(name='hostname', type=d.T.string)]), + withHostname(hostname): { hostname: hostname }, + '#withIp':: d.fn(help='"ip is set for load-balancer ingress points that are IP based."', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#withPorts':: d.fn(help='"ports provides information about the ports exposed by this LoadBalancer."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports provides information about the ports exposed by this LoadBalancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet new file mode 100644 index 00000000000..bf8a32d2c4f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressLoadBalancerStatus', url='', help='"IngressLoadBalancerStatus represents the status of a load-balancer."'), + '#withIngress':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."', args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withIngressMixin':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet new file mode 100644 index 00000000000..035d1b0d6a5 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressPortStatus', url='', help='"IngressPortStatus represents the error condition of a service port"'), + '#withError':: d.fn(help='"error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\\n CamelCase names\\n- cloud provider specific error values must have names that comply with the\\n format foo.example.com/CamelCase."', args=[d.arg(name='err', type=d.T.string)]), + withError(err): { 'error': err }, + '#withPort':: d.fn(help='"port is the port number of the ingress port."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol is the protocol of the ingress port. The supported values are: \\"TCP\\", \\"UDP\\", \\"SCTP\\', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet similarity index 78% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet index ebba87ea6bf..d58de75625e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet @@ -3,12 +3,12 @@ '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), + '#withPaths':: d.fn(help='"paths is a collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), + '#withPathsMixin':: d.fn(help='"paths is a collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), + '#withHost':: d.fn(help="\"host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nhost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), withHost(host): { host: host }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet index fe0447b3348..7a7c0141fe1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet @@ -3,12 +3,12 @@ '#':: d.pkg(name='ingressServiceBackend', url='', help='"IngressServiceBackend references a Kubernetes Service as a Backend."'), '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { port+: { name: name } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { port+: { number: number } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet similarity index 51% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet index bd79b361709..c99326738b6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet @@ -16,24 +16,24 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { defaultBackend+: { service+: { port+: { name: name } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { defaultBackend+: { service+: { port+: { number: number } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { defaultBackend+: { service+: { name: name } } }, }, }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), + '#withIngressClassName':: d.fn(help='"ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present."', args=[d.arg(name='ingressClassName', type=d.T.string)]), withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), + '#withRules':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + '#withRulesMixin':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), + '#withTls':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), + '#withTlsMixin':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet new file mode 100644 index 00000000000..b2046d7f418 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet @@ -0,0 +1,13 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), + '#loadBalancer':: d.obj(help='"IngressLoadBalancerStatus represents the status of a load-balancer."'), + loadBalancer: { + '#withIngress':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."', args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, + '#withIngressMixin':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet new file mode 100644 index 00000000000..16789baf635 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an ingress."'), + '#withHosts':: d.fn(help='"hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), + withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, + '#withHostsMixin':: d.fn(help='"hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), + withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, + '#withSecretName':: d.fn(help='"secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the \\"Host\\" header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet new file mode 100644 index 00000000000..b54b0d6d997 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipBlock', url='', help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.0/24\\\",\\\"2001:db8::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), + '#withCidr':: d.fn(help='"cidr is a string representing the IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), + withCidr(cidr): { cidr: cidr }, + '#withExcept':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"', args=[d.arg(name='except', type=d.T.array)]), + withExcept(except): { except: if std.isArray(v=except) then except else [except] }, + '#withExceptMixin':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), + withExceptMixin(except): { except+: if std.isArray(v=except) then except else [except] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet index fdfbdc14ac6..fd4e3e05119 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet @@ -8,6 +8,9 @@ ingressClass: (import 'ingressClass.libsonnet'), ingressClassParametersReference: (import 'ingressClassParametersReference.libsonnet'), ingressClassSpec: (import 'ingressClassSpec.libsonnet'), + ingressLoadBalancerIngress: (import 'ingressLoadBalancerIngress.libsonnet'), + ingressLoadBalancerStatus: (import 'ingressLoadBalancerStatus.libsonnet'), + ingressPortStatus: (import 'ingressPortStatus.libsonnet'), ingressRule: (import 'ingressRule.libsonnet'), ingressServiceBackend: (import 'ingressServiceBackend.libsonnet'), ingressSpec: (import 'ingressSpec.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet similarity index 68% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet index 6053fefe663..49ca87f7821 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='networkPolicy', url='', help='"NetworkPolicy describes what network traffic is allowed for a set of Pods"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of NetworkPolicy', args=[d.arg(name='name', type=d.T.string)]), @@ -64,17 +62,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { podSelector+: { matchLabels+: matchLabels } } }, }, - '#withEgress':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), + '#withEgress':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), withEgress(egress): { spec+: { egress: if std.isArray(v=egress) then egress else [egress] } }, - '#withEgressMixin':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), + '#withEgressMixin':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), withEgressMixin(egress): { spec+: { egress+: if std.isArray(v=egress) then egress else [egress] } }, - '#withIngress':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), + '#withIngress':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), withIngress(ingress): { spec+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), + '#withIngressMixin':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), withIngressMixin(ingress): { spec+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withPolicyTypes':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), + '#withPolicyTypes':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), withPolicyTypes(policyTypes): { spec+: { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] } }, - '#withPolicyTypesMixin':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), + '#withPolicyTypesMixin':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), withPolicyTypesMixin(policyTypes): { spec+: { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet new file mode 100644 index 00000000000..0ccc9fcf150 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyEgressRule', url='', help="\"NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8\""), + '#withPorts':: d.fn(help='"ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withTo':: d.fn(help='"to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."', args=[d.arg(name='to', type=d.T.array)]), + withTo(to): { to: if std.isArray(v=to) then to else [to] }, + '#withToMixin':: d.fn(help='"to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='to', type=d.T.array)]), + withToMixin(to): { to+: if std.isArray(v=to) then to else [to] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet new file mode 100644 index 00000000000..fbe27f9f8d9 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyIngressRule', url='', help="\"NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.\""), + '#withFrom':: d.fn(help='"from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."', args=[d.arg(name='from', type=d.T.array)]), + withFrom(from): { from: if std.isArray(v=from) then from else [from] }, + '#withFromMixin':: d.fn(help='"from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='from', type=d.T.array)]), + withFromMixin(from): { from+: if std.isArray(v=from) then from else [from] }, + '#withPorts':: d.fn(help='"ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet index 0ed7c3a58d8..beac3d05cc4 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='networkPolicyPeer', url='', help='"NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed"'), - '#ipBlock':: d.obj(help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.1/24\\\",\\\"2001:db9::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), + '#ipBlock':: d.obj(help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.0/24\\\",\\\"2001:db8::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), ipBlock: { - '#withCidr':: d.fn(help='"CIDR is a string representing the IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), + '#withCidr':: d.fn(help='"cidr is a string representing the IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), withCidr(cidr): { ipBlock+: { cidr: cidr } }, - '#withExcept':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"', args=[d.arg(name='except', type=d.T.array)]), + '#withExcept':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"', args=[d.arg(name='except', type=d.T.array)]), withExcept(except): { ipBlock+: { except: if std.isArray(v=except) then except else [except] } }, - '#withExceptMixin':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), + '#withExceptMixin':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), withExceptMixin(except): { ipBlock+: { except+: if std.isArray(v=except) then except else [except] } }, }, '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet new file mode 100644 index 00000000000..3c98019075c --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyPort', url='', help='"NetworkPolicyPort describes a port to allow traffic on"'), + '#withEndPort':: d.fn(help='"endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port."', args=[d.arg(name='endPort', type=d.T.integer)]), + withEndPort(endPort): { endPort: endPort }, + '#withPort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='port', type=d.T.string)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet new file mode 100644 index 00000000000..d8efd2f2666 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicySpec', url='', help='"NetworkPolicySpec provides the specification of a NetworkPolicy"'), + '#podSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + podSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podSelector+: { matchLabels+: matchLabels } }, + }, + '#withEgress':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), + withEgress(egress): { egress: if std.isArray(v=egress) then egress else [egress] }, + '#withEgressMixin':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), + withEgressMixin(egress): { egress+: if std.isArray(v=egress) then egress else [egress] }, + '#withIngress':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withIngressMixin':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withPolicyTypes':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), + withPolicyTypes(policyTypes): { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, + '#withPolicyTypesMixin':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), + withPolicyTypesMixin(policyTypes): { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet index c22ffe1326f..b3f4955e902 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='serviceBackendPort', url='', help='"ServiceBackendPort is the service port being referenced."'), - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { number: number }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet new file mode 100644 index 00000000000..0bbfa21c89a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet @@ -0,0 +1,68 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipAddress', url='', help='"IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1"'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of IPAddress', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'networking.k8s.io/v1alpha1', + kind: 'IPAddress', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"IPAddressSpec describe the attributes in an IP Address."'), + spec: { + '#parentRef':: d.obj(help='"ParentReference describes a reference to a parent object."'), + parentRef: { + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { spec+: { parentRef+: { group: group } } }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parentRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { parentRef+: { namespace: namespace } } }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { spec+: { parentRef+: { resource: resource } } }, + }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet new file mode 100644 index 00000000000..95a01d19724 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipAddressSpec', url='', help='"IPAddressSpec describe the attributes in an IP Address."'), + '#parentRef':: d.obj(help='"ParentReference describes a reference to a parent object."'), + parentRef: { + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { parentRef+: { group: group } }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parentRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { parentRef+: { namespace: namespace } }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { parentRef+: { resource: resource } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..094796ec803 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + ipAddress: (import 'ipAddress.libsonnet'), + ipAddressSpec: (import 'ipAddressSpec.libsonnet'), + parentReference: (import 'parentReference.libsonnet'), + serviceCIDR: (import 'serviceCIDR.libsonnet'), + serviceCIDRSpec: (import 'serviceCIDRSpec.libsonnet'), + serviceCIDRStatus: (import 'serviceCIDRStatus.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet new file mode 100644 index 00000000000..89346173087 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='parentReference', url='', help='"ParentReference describes a reference to a parent object."'), + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { group: group }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet new file mode 100644 index 00000000000..7da564faee6 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet @@ -0,0 +1,61 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDR', url='', help='"ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ServiceCIDR', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'networking.k8s.io/v1alpha1', + kind: 'ServiceCIDR', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services."'), + spec: { + '#withCidrs':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrs(cidrs): { spec+: { cidrs: if std.isArray(v=cidrs) then cidrs else [cidrs] } }, + '#withCidrsMixin':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrsMixin(cidrs): { spec+: { cidrs+: if std.isArray(v=cidrs) then cidrs else [cidrs] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet new file mode 100644 index 00000000000..9b68bc70bdc --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDRSpec', url='', help='"ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services."'), + '#withCidrs':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrs(cidrs): { cidrs: if std.isArray(v=cidrs) then cidrs else [cidrs] }, + '#withCidrsMixin':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrsMixin(cidrs): { cidrs+: if std.isArray(v=cidrs) then cidrs else [cidrs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet new file mode 100644 index 00000000000..bfc5133f6a8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDRStatus', url='', help='"ServiceCIDRStatus describes the current state of the ServiceCIDR."'), + '#withConditions':: d.fn(help='"conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state"', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet similarity index 58% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet index 5ffbc8e086e..eec7a2a6401 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='node', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet index fa33b3e39f3..9960b6f88c8 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixed':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixedMixin':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixedMixin(podFixed): { podFixed+: podFixed }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet index e4493b10ac0..40dda33e7ac 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), @@ -53,9 +51,9 @@ } + self.metadata.withName(name=name), '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixed':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixedMixin':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, }, '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), @@ -69,7 +67,7 @@ '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, }, - '#withHandler':: d.fn(help='"Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), + '#withHandler':: d.fn(help='"handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), withHandler(handler): { handler: handler }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/scheduling.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/scheduling.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet index 4d23cf36e8f..534c0f40701 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='policy', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet index 76adeae8fef..10a2ba87af1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet @@ -27,12 +27,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -43,21 +41,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -65,14 +63,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Eviction', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'policy/v1beta1', + apiVersion: 'policy/v1', kind: 'Eviction', } + self.metadata.withName(name=name), '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet index 756aa865a8b..8edfbbf3cf7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), + eviction: (import 'eviction.libsonnet'), podDisruptionBudget: (import 'podDisruptionBudget.libsonnet'), podDisruptionBudgetSpec: (import 'podDisruptionBudgetSpec.libsonnet'), podDisruptionBudgetStatus: (import 'podDisruptionBudgetStatus.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet index b7fbf166da6..4be9c09cfbf 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podDisruptionBudget', url='', help='"PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PodDisruptionBudget', args=[d.arg(name='name', type=d.T.string)]), @@ -68,6 +66,8 @@ withMaxUnavailable(maxUnavailable): { spec+: { maxUnavailable: maxUnavailable } }, '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), withMinAvailable(minAvailable): { spec+: { minAvailable: minAvailable } }, + '#withUnhealthyPodEvictionPolicy':: d.fn(help='"UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\\"Ready\\",status=\\"True\\".\\n\\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\\n\\nIfHealthyBudget policy means that running pods (status.phase=\\"Running\\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\\n\\nAlwaysAllow policy means that all running pods (status.phase=\\"Running\\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\\n\\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\\n\\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default)."', args=[d.arg(name='unhealthyPodEvictionPolicy', type=d.T.string)]), + withUnhealthyPodEvictionPolicy(unhealthyPodEvictionPolicy): { spec+: { unhealthyPodEvictionPolicy: unhealthyPodEvictionPolicy } }, }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet similarity index 65% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet index 84917662145..93cd035440d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet @@ -16,6 +16,8 @@ withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), withMinAvailable(minAvailable): { minAvailable: minAvailable }, + '#withUnhealthyPodEvictionPolicy':: d.fn(help='"UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\\"Ready\\",status=\\"True\\".\\n\\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\\n\\nIfHealthyBudget policy means that running pods (status.phase=\\"Running\\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\\n\\nAlwaysAllow policy means that all running pods (status.phase=\\"Running\\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\\n\\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\\n\\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default)."', args=[d.arg(name='unhealthyPodEvictionPolicy', type=d.T.string)]), + withUnhealthyPodEvictionPolicy(unhealthyPodEvictionPolicy): { unhealthyPodEvictionPolicy: unhealthyPodEvictionPolicy }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet similarity index 58% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet index f7d3fc1bb5b..de8fa2ef0f1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='rbac', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/aggregationRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/aggregationRule.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet index 1e710e1d8c4..2b0192bb5bf 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet @@ -10,12 +10,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -26,21 +24,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -48,9 +46,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet index 7f167b44f4f..4a29e743986 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet similarity index 73% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet index 9b15ccb2c07..e42ac307f8b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), + '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, @@ -13,13 +13,13 @@ withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."', args=[d.arg(name='resources', type=d.T.array)]), + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"", args=[d.arg(name='resources', type=d.T.array)]), withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), + '#withVerbs':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"", args=[d.arg(name='verbs', type=d.T.array)]), withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + '#withVerbsMixin':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='verbs', type=d.T.array)]), withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet index 960c71a7d75..295f6254585 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet index 2a6d5dea7e8..75747fe5b52 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleRef.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleRef.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/subject.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/subject.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet new file mode 100644 index 00000000000..98b5e9e085a --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resource', url='', help=''), + v1alpha2: (import 'v1alpha2/main.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet new file mode 100644 index 00000000000..569aad3ff55 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='allocationResult', url='', help='"AllocationResult contains attributes of an allocated resource."'), + '#availableOnNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + availableOnNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { availableOnNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { availableOnNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + }, + '#withResourceHandles':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandles(resourceHandles): { resourceHandles: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] }, + '#withResourceHandlesMixin':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandlesMixin(resourceHandles): { resourceHandles+: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] }, + '#withShareable':: d.fn(help='"Shareable determines whether the resource supports more than one consumer at a time."', args=[d.arg(name='shareable', type=d.T.boolean)]), + withShareable(shareable): { shareable: shareable }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet new file mode 100644 index 00000000000..957ef0c5a8f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha2', url='', help=''), + allocationResult: (import 'allocationResult.libsonnet'), + podSchedulingContext: (import 'podSchedulingContext.libsonnet'), + podSchedulingContextSpec: (import 'podSchedulingContextSpec.libsonnet'), + podSchedulingContextStatus: (import 'podSchedulingContextStatus.libsonnet'), + resourceClaim: (import 'resourceClaim.libsonnet'), + resourceClaimConsumerReference: (import 'resourceClaimConsumerReference.libsonnet'), + resourceClaimParametersReference: (import 'resourceClaimParametersReference.libsonnet'), + resourceClaimSchedulingStatus: (import 'resourceClaimSchedulingStatus.libsonnet'), + resourceClaimSpec: (import 'resourceClaimSpec.libsonnet'), + resourceClaimStatus: (import 'resourceClaimStatus.libsonnet'), + resourceClaimTemplate: (import 'resourceClaimTemplate.libsonnet'), + resourceClaimTemplateSpec: (import 'resourceClaimTemplateSpec.libsonnet'), + resourceClass: (import 'resourceClass.libsonnet'), + resourceClassParametersReference: (import 'resourceClassParametersReference.libsonnet'), + resourceHandle: (import 'resourceHandle.libsonnet'), +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet new file mode 100644 index 00000000000..91ffccd38c7 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet @@ -0,0 +1,63 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContext', url='', help='"PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \\"WaitForFirstConsumer\\" allocation mode.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of PodSchedulingContext', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'PodSchedulingContext', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"PodSchedulingContextSpec describes where resources for the Pod are needed."'), + spec: { + '#withPotentialNodes':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodes(potentialNodes): { spec+: { potentialNodes: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] } }, + '#withPotentialNodesMixin':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodesMixin(potentialNodes): { spec+: { potentialNodes+: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] } }, + '#withSelectedNode':: d.fn(help='"SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \\"WaitForFirstConsumer\\" allocation is to be attempted."', args=[d.arg(name='selectedNode', type=d.T.string)]), + withSelectedNode(selectedNode): { spec+: { selectedNode: selectedNode } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet new file mode 100644 index 00000000000..ec20974ea4e --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContextSpec', url='', help='"PodSchedulingContextSpec describes where resources for the Pod are needed."'), + '#withPotentialNodes':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodes(potentialNodes): { potentialNodes: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] }, + '#withPotentialNodesMixin':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodesMixin(potentialNodes): { potentialNodes+: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] }, + '#withSelectedNode':: d.fn(help='"SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \\"WaitForFirstConsumer\\" allocation is to be attempted."', args=[d.arg(name='selectedNode', type=d.T.string)]), + withSelectedNode(selectedNode): { selectedNode: selectedNode }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet new file mode 100644 index 00000000000..c3e78ff2d9e --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContextStatus', url='', help='"PodSchedulingContextStatus describes where resources for the Pod can be allocated."'), + '#withResourceClaims':: d.fn(help='"ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \\"WaitForFirstConsumer\\" allocation mode."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \\"WaitForFirstConsumer\\" allocation mode."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet similarity index 73% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet index ff3faec5fea..18f7803ae8c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='lease', url='', help='"Lease defines a lease concept."'), + '#':: d.pkg(name='resourceClaim', url='', help='"ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,28 +39,31 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, - '#new':: d.fn(help='new returns an instance of Lease', args=[d.arg(name='name', type=d.T.string)]), + '#new':: d.fn(help='new returns an instance of ResourceClaim', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'coordination.k8s.io/v1beta1', - kind: 'Lease', + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClaim', } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"LeaseSpec is a specification of a Lease."'), + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), spec: { - '#withAcquireTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='acquireTime', type=d.T.string)]), - withAcquireTime(acquireTime): { spec+: { acquireTime: acquireTime } }, - '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), - withHolderIdentity(holderIdentity): { spec+: { holderIdentity: holderIdentity } }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), - withLeaseDurationSeconds(leaseDurationSeconds): { spec+: { leaseDurationSeconds: leaseDurationSeconds } }, - '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), - withLeaseTransitions(leaseTransitions): { spec+: { leaseTransitions: leaseTransitions } }, - '#withRenewTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='renewTime', type=d.T.string)]), - withRenewTime(renewTime): { spec+: { renewTime: renewTime } }, + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { parametersRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { parametersRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parametersRef+: { name: name } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { allocationMode: allocationMode } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { resourceClassName: resourceClassName } }, }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet new file mode 100644 index 00000000000..e04b19dfda8 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimConsumerReference', url='', help='"ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withResource':: d.fn(help='"Resource is the type of resource being referenced, for example \\"pods\\"."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#withUid':: d.fn(help='"UID identifies exactly one incarnation of the resource."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet new file mode 100644 index 00000000000..f6e426aa2e3 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimParametersReference', url='', help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet new file mode 100644 index 00000000000..5a7a6328683 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimSchedulingStatus', url='', help='"ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \\"WaitForFirstConsumer\\" allocation mode."'), + '#withName':: d.fn(help='"Name matches the pod.spec.resourceClaims[*].Name field."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withUnsuitableNodes':: d.fn(help='"UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\\n\\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced."', args=[d.arg(name='unsuitableNodes', type=d.T.array)]), + withUnsuitableNodes(unsuitableNodes): { unsuitableNodes: if std.isArray(v=unsuitableNodes) then unsuitableNodes else [unsuitableNodes] }, + '#withUnsuitableNodesMixin':: d.fn(help='"UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\\n\\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='unsuitableNodes', type=d.T.array)]), + withUnsuitableNodesMixin(unsuitableNodes): { unsuitableNodes+: if std.isArray(v=unsuitableNodes) then unsuitableNodes else [unsuitableNodes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet new file mode 100644 index 00000000000..12f08aed5bf --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimSpec', url='', help='"ResourceClaimSpec defines how a resource is to be allocated."'), + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { parametersRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { parametersRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parametersRef+: { name: name } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { allocationMode: allocationMode }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { resourceClassName: resourceClassName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet new file mode 100644 index 00000000000..b036002087f --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet @@ -0,0 +1,30 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimStatus', url='', help='"ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are."'), + '#allocation':: d.obj(help='"AllocationResult contains attributes of an allocated resource."'), + allocation: { + '#availableOnNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + availableOnNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { allocation+: { availableOnNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { allocation+: { availableOnNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } }, + }, + '#withResourceHandles':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandles(resourceHandles): { allocation+: { resourceHandles: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] } }, + '#withResourceHandlesMixin':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandlesMixin(resourceHandles): { allocation+: { resourceHandles+: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] } }, + '#withShareable':: d.fn(help='"Shareable determines whether the resource supports more than one consumer at a time."', args=[d.arg(name='shareable', type=d.T.boolean)]), + withShareable(shareable): { allocation+: { shareable: shareable } }, + }, + '#withDeallocationRequested':: d.fn(help='"DeallocationRequested indicates that a ResourceClaim is to be deallocated.\\n\\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\\n\\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor."', args=[d.arg(name='deallocationRequested', type=d.T.boolean)]), + withDeallocationRequested(deallocationRequested): { deallocationRequested: deallocationRequested }, + '#withDriverName':: d.fn(help='"DriverName is a copy of the driver name from the ResourceClass at the time when allocation started."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#withReservedFor':: d.fn(help='"ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\\n\\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced."', args=[d.arg(name='reservedFor', type=d.T.array)]), + withReservedFor(reservedFor): { reservedFor: if std.isArray(v=reservedFor) then reservedFor else [reservedFor] }, + '#withReservedForMixin':: d.fn(help='"ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\\n\\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='reservedFor', type=d.T.array)]), + withReservedForMixin(reservedFor): { reservedFor+: if std.isArray(v=reservedFor) then reservedFor else [reservedFor] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet new file mode 100644 index 00000000000..54f9b26ef99 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet @@ -0,0 +1,116 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimTemplate', url='', help='"ResourceClaimTemplate is used to produce ResourceClaim objects."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ResourceClaimTemplate', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClaimTemplate', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim."'), + spec: { + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { metadata+: { annotations: annotations } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { metadata+: { annotations+: annotations } } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { spec+: { metadata+: { creationTimestamp: creationTimestamp } } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { spec+: { metadata+: { deletionTimestamp: deletionTimestamp } } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { spec+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { spec+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { spec+: { metadata+: { generateName: generateName } } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { spec+: { metadata+: { generation: generation } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { metadata+: { labels: labels } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { metadata+: { labels+: labels } } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { spec+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { spec+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { metadata+: { name: name } } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { metadata+: { namespace: namespace } } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { spec+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { spec+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { spec+: { metadata+: { resourceVersion: resourceVersion } } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { spec+: { metadata+: { selfLink: selfLink } } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { spec+: { metadata+: { uid: uid } } }, + }, + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), + spec: { + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { spec+: { parametersRef+: { apiGroup: apiGroup } } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { spec+: { parametersRef+: { kind: kind } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { spec+: { parametersRef+: { name: name } } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { spec+: { allocationMode: allocationMode } } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { spec+: { resourceClassName: resourceClassName } } }, + }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet index 721fa55497c..69c42e9fbd7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReview', url='', help="\"SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.\""), + '#':: d.pkg(name='resourceClaimTemplateSpec', url='', help='"ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,20 +39,26 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, - '#new':: d.fn(help='new returns an instance of SelfSubjectRulesReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SelfSubjectRulesReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help=''), + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), spec: { - '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { namespace: namespace } }, + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { parametersRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { parametersRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parametersRef+: { name: name } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { allocationMode: allocationMode } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { resourceClassName: resourceClassName } }, }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet new file mode 100644 index 00000000000..fd069ac11f1 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet @@ -0,0 +1,74 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClass', url='', help='"ResourceClass is used by administrators to influence how resources are allocated.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ResourceClass', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClass', + } + self.metadata.withName(name=name), + '#parametersRef':: d.obj(help='"ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { parametersRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { parametersRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parametersRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { parametersRef+: { namespace: namespace } }, + }, + '#suitableNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + suitableNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { suitableNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { suitableNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + }, + '#withDriverName':: d.fn(help='"DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\\n\\nResource drivers have a unique name in forward domain order (acme.example.com)."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet new file mode 100644 index 00000000000..68ea9376f05 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClassParametersReference', url='', help='"ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet new file mode 100644 index 00000000000..7be0566ddbe --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceHandle', url='', help='"ResourceHandle holds opaque resource data for processing by a specific kubelet plugin."'), + '#withData':: d.fn(help='"Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\\n\\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced."', args=[d.arg(name='data', type=d.T.string)]), + withData(data): { data: data }, + '#withDriverName':: d.fn(help="\"DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.\"", args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet similarity index 59% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet index 69579538843..5029dd93b0d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='scheduling', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet index 8c47cfe159a..f0da21fc706 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='priorityClass', url='', help='"PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), @@ -55,9 +53,9 @@ withDescription(description): { description: description }, '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), + '#withValue':: d.fn(help='"value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet index 5b2186168a5..b8129d86c6c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet @@ -3,5 +3,4 @@ '#':: d.pkg(name='storage', url='', help=''), v1: (import 'v1/main.libsonnet'), v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet similarity index 69% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet index 78435e56f38..54217c6356b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='csiDriver', url='', help='"CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSIDriver', args=[d.arg(name='name', type=d.T.string)]), @@ -55,21 +53,23 @@ spec: { '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), withAttachRequired(attachRequired): { spec+: { attachRequired: attachRequired } }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), + '#withFsGroupPolicy':: d.fn(help="\"fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\\n\\nThis field is immutable.\\n\\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.\"", args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), withFsGroupPolicy(fsGroupPolicy): { spec+: { fsGroupPolicy: fsGroupPolicy } }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), + '#withPodInfoOnMount':: d.fn(help="\"podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\\n\\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\\n\\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), withPodInfoOnMount(podInfoOnMount): { spec+: { podInfoOnMount: podInfoOnMount } }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), + '#withRequiresRepublish':: d.fn(help='"requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), withRequiresRepublish(requiresRepublish): { spec+: { requiresRepublish: requiresRepublish } }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), + '#withSeLinuxMount':: d.fn(help="\"seLinuxMount specifies if the CSI driver supports \\\"-o context\\\" mount option.\\n\\nWhen \\\"true\\\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \\\"-o context=xyz\\\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\\n\\nWhen \\\"false\\\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\\n\\nDefault is \\\"false\\\".\"", args=[d.arg(name='seLinuxMount', type=d.T.boolean)]), + withSeLinuxMount(seLinuxMount): { spec+: { seLinuxMount: seLinuxMount } }, + '#withStorageCapacity':: d.fn(help='"storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field was immutable in Kubernetes <= 1.22 and now is mutable."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), withStorageCapacity(storageCapacity): { spec+: { storageCapacity: storageCapacity } }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), + '#withTokenRequests':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), withTokenRequests(tokenRequests): { spec+: { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), + '#withTokenRequestsMixin':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), withTokenRequestsMixin(tokenRequests): { spec+: { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), withVolumeLifecycleModes(volumeLifecycleModes): { spec+: { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), withVolumeLifecycleModesMixin(volumeLifecycleModes): { spec+: { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet new file mode 100644 index 00000000000..a8248051e78 --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), + '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), + withAttachRequired(attachRequired): { attachRequired: attachRequired }, + '#withFsGroupPolicy':: d.fn(help="\"fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\\n\\nThis field is immutable.\\n\\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.\"", args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), + withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, + '#withPodInfoOnMount':: d.fn(help="\"podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\\n\\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\\n\\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), + withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, + '#withRequiresRepublish':: d.fn(help='"requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), + withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, + '#withSeLinuxMount':: d.fn(help="\"seLinuxMount specifies if the CSI driver supports \\\"-o context\\\" mount option.\\n\\nWhen \\\"true\\\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \\\"-o context=xyz\\\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\\n\\nWhen \\\"false\\\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\\n\\nDefault is \\\"false\\\".\"", args=[d.arg(name='seLinuxMount', type=d.T.boolean)]), + withSeLinuxMount(seLinuxMount): { seLinuxMount: seLinuxMount }, + '#withStorageCapacity':: d.fn(help='"storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field was immutable in Kubernetes <= 1.22 and now is mutable."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), + withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, + '#withTokenRequests':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), + withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, + '#withTokenRequestsMixin':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), + withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, + '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, + '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet index d4540670143..b052ab088f6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='csiNode', url='', help="\"CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSINode', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet index cceebc97156..b92b8dc492b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet @@ -3,10 +3,10 @@ '#':: d.pkg(name='csiNodeDriver', url='', help='"CSINodeDriver holds information about the specification of one CSI driver installed on a node"'), '#allocatable':: d.obj(help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), allocatable: { - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), + '#withCount':: d.fn(help='"count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), withCount(count): { allocatable+: { count: count } }, }, - '#withName':: d.fn(help='"This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#withNodeID':: d.fn(help='"nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \\"node1\\", but the storage system may refer to the same node as \\"nodeA\\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \\"nodeA\\" instead of \\"node1\\". This field is required."', args=[d.arg(name='nodeID', type=d.T.string)]), withNodeID(nodeID): { nodeID: nodeID }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet similarity index 63% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet index 8b3fcb68490..c1e1484d5aa 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity."'), + '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSIStorageCapacity', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'storage.k8s.io/v1beta1', + apiVersion: 'storage.k8s.io/v1', kind: 'CSIStorageCapacity', } + self.metadata.withName(name=name), '#nodeTopology':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -62,11 +60,11 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { nodeTopology+: { matchLabels+: matchLabels } }, }, - '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), + '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), withCapacity(capacity): { capacity: capacity }, - '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), + '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), withMaximumVolumeSize(maximumVolumeSize): { maximumVolumeSize: maximumVolumeSize }, - '#withStorageClassName':: d.fn(help='"The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet similarity index 94% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet index 9413b010121..10082c47085 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), + '#':: d.pkg(name='v1', url='', help=''), csiDriver: (import 'csiDriver.libsonnet'), csiDriverSpec: (import 'csiDriverSpec.libsonnet'), csiNode: (import 'csiNode.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet index f627289f551..15004ad00dc 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='storageClass', url='', help='"StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\\n\\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StorageClass', args=[d.arg(name='name', type=d.T.string)]), @@ -51,25 +49,25 @@ apiVersion: 'storage.k8s.io/v1', kind: 'StorageClass', } + self.metadata.withName(name=name), - '#withAllowVolumeExpansion':: d.fn(help='"AllowVolumeExpansion shows whether the storage class allow volume expand"', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), + '#withAllowVolumeExpansion':: d.fn(help='"allowVolumeExpansion shows whether the storage class allow volume expand."', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), withAllowVolumeExpansion(allowVolumeExpansion): { allowVolumeExpansion: allowVolumeExpansion }, - '#withAllowedTopologies':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), + '#withAllowedTopologies':: d.fn(help='"allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), withAllowedTopologies(allowedTopologies): { allowedTopologies: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withAllowedTopologiesMixin':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), + '#withAllowedTopologiesMixin':: d.fn(help='"allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), withAllowedTopologiesMixin(allowedTopologies): { allowedTopologies+: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withMountOptions':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withParameters':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), + '#withParameters':: d.fn(help='"parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), withParameters(parameters): { parameters: parameters }, - '#withParametersMixin':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), + '#withParametersMixin':: d.fn(help='"parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), withParametersMixin(parameters): { parameters+: parameters }, - '#withProvisioner':: d.fn(help='"Provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), + '#withProvisioner':: d.fn(help='"provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), withProvisioner(provisioner): { provisioner: provisioner }, - '#withReclaimPolicy':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), + '#withReclaimPolicy':: d.fn(help='"reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), withReclaimPolicy(reclaimPolicy): { reclaimPolicy: reclaimPolicy }, - '#withVolumeBindingMode':: d.fn(help='"VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), + '#withVolumeBindingMode':: d.fn(help='"volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), withVolumeBindingMode(volumeBindingMode): { volumeBindingMode: volumeBindingMode }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet index 38de143e7be..d7a557853c0 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest contains parameters of a service account token."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + '#withAudience':: d.fn(help='"audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\"."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\"."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet similarity index 65% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet index c4897db2a84..20b46a5cc83 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), @@ -59,77 +57,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -153,164 +151,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -325,87 +330,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -427,51 +432,53 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { source+: { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), + '#withAttacher':: d.fn(help='"attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { spec+: { nodeName: nodeName } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet similarity index 57% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet index 47ca4b20576..6c46e4d512c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet @@ -5,77 +5,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -99,164 +99,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -271,87 +278,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -373,46 +380,48 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet similarity index 59% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet index 8b092052a56..7baf41f75d9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet @@ -7,77 +7,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -101,164 +101,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -273,87 +280,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -375,51 +382,53 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { source+: { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), + '#withAttacher':: d.fn(help='"attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet new file mode 100644 index 00000000000..758a4409e9d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), + '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), + attachError: { + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { attachError+: { message: message } }, + '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), + withTime(time): { attachError+: { time: time } }, + }, + '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), + detachError: { + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { detachError+: { message: message } }, + '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), + withTime(time): { detachError+: { time: time } }, + }, + '#withAttached':: d.fn(help='"attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), + withAttached(attached): { attached: attached }, + '#withAttachmentMetadata':: d.fn(help='"attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), + withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, + '#withAttachmentMetadataMixin':: d.fn(help='"attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), + withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet similarity index 69% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet index 87f1e4e18b8..dab32f4fc87 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), withTime(time): { time: time }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet new file mode 100644 index 00000000000..223de65ed1d --- /dev/null +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), + '#withCount':: d.fn(help='"count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), + withCount(count): { count: count }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet similarity index 59% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet index 1b042817725..b477f335984 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet @@ -1,5 +1,5 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1alpha1', url='', help=''), - priorityClass: (import 'priorityClass.libsonnet'), + volumeAttributesClass: (import 'volumeAttributesClass.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet similarity index 71% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet index c1e906e19e8..62cc081b0de 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22."'), + '#':: d.pkg(name='volumeAttributesClass', url='', help='"VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,29 +39,22 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, - '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), + '#new':: d.fn(help='new returns an instance of VolumeAttributesClass', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'RoleBinding', + apiVersion: 'storage.k8s.io/v1alpha1', + kind: 'VolumeAttributesClass', } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, + '#withDriverName':: d.fn(help='"Name of the CSI driver This field is immutable."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#withParameters':: d.fn(help='"parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\\n\\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \\"Infeasible\\" state in the modifyVolumeStatus field."', args=[d.arg(name='parameters', type=d.T.object)]), + withParameters(parameters): { parameters: parameters }, + '#withParametersMixin':: d.fn(help='"parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\\n\\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \\"Infeasible\\" state in the modifyVolumeStatus field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), + withParametersMixin(parameters): { parameters+: parameters }, '#mixin': 'ignore', mixin: self, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet index 930daad260a..ba0d522a799 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet +++ b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='k', url='github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet', help='Generated Jsonnet library for Kubernetes v1.21'), + '#':: d.pkg(name='k', url='github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet', help='Generated Jsonnet library for Kubernetes v1.29'), admissionregistration:: (import '_gen/admissionregistration/main.libsonnet'), apiregistration:: (import '_gen/apiregistration/main.libsonnet'), apiserverinternal:: (import '_gen/apiserverinternal/main.libsonnet'), @@ -14,13 +14,13 @@ core:: (import '_gen/core/main.libsonnet'), discovery:: (import '_gen/discovery/main.libsonnet'), events:: (import '_gen/events/main.libsonnet'), - extensions:: (import '_gen/extensions/main.libsonnet'), flowcontrol:: (import '_gen/flowcontrol/main.libsonnet'), meta:: (import '_gen/meta/main.libsonnet'), networking:: (import '_gen/networking/main.libsonnet'), node:: (import '_gen/node/main.libsonnet'), policy:: (import '_gen/policy/main.libsonnet'), rbac:: (import '_gen/rbac/main.libsonnet'), + resource:: (import '_gen/resource/main.libsonnet'), scheduling:: (import '_gen/scheduling/main.libsonnet'), storage:: (import '_gen/storage/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet b/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet rename to example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet diff --git a/operations/jsonnet-compiled/util/jsonnetfile.json b/operations/jsonnet-compiled/util/jsonnetfile.json index c04d5b3e88f..05e164e1227 100644 --- a/operations/jsonnet-compiled/util/jsonnetfile.json +++ b/operations/jsonnet-compiled/util/jsonnetfile.json @@ -23,7 +23,7 @@ "source": { "git": { "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" + "subdir": "1.29" } }, "version": "main" diff --git a/operations/jsonnet-compiled/util/jsonnetfile.lock.json b/operations/jsonnet-compiled/util/jsonnetfile.lock.json index 43a34ab6189..94e15607596 100644 --- a/operations/jsonnet-compiled/util/jsonnetfile.lock.json +++ b/operations/jsonnet-compiled/util/jsonnetfile.lock.json @@ -8,7 +8,7 @@ "subdir": "ksonnet-util" } }, - "version": "3d58bd591c278f3f342bc1e25399806c49ace104", + "version": "84e49c8549fa472c963862f233422c8b368afabe", "sum": "0y3AFX9LQSpfWTxWKSwoLgbt0Wc9nnCwhMH2szKzHv0=" }, { @@ -18,18 +18,18 @@ "subdir": "memcached" } }, - "version": "3d58bd591c278f3f342bc1e25399806c49ace104", + "version": "84e49c8549fa472c963862f233422c8b368afabe", "sum": "Cc715Y3rgTuimgDFIw+FaKzXSJGRYwt1pFTMbdrNBD8=" }, { "source": { "git": { "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" + "subdir": "1.29" } }, - "version": "3e32f80d1493d1579d273d1522af1fae2cc7c97f", - "sum": "b8GtKWztbpnnMojHt8A9sfkEgs+2t2rtvZcpDteuLFo=" + "version": "bf9a62cfd32a58c071b8410bfcdec058475dd25e", + "sum": "i2w3hGbgQmaB73t5LJHSioPOVdYv8ZBvivHiDwZJVyI=" }, { "source": { diff --git a/operations/jsonnet-compiled/util/lib/k.libsonnet b/operations/jsonnet-compiled/util/lib/k.libsonnet index da3161d5f53..62873e17a81 100644 --- a/operations/jsonnet-compiled/util/lib/k.libsonnet +++ b/operations/jsonnet-compiled/util/lib/k.libsonnet @@ -1 +1 @@ -import 'github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet' +import 'github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet' diff --git a/operations/jsonnet-compiled/util/vendor/1.21 b/operations/jsonnet-compiled/util/vendor/1.21 deleted file mode 120000 index 406ebf32b16..00000000000 --- a/operations/jsonnet-compiled/util/vendor/1.21 +++ /dev/null @@ -1 +0,0 @@ -github.com/jsonnet-libs/k8s-libsonnet/1.21 \ No newline at end of file diff --git a/operations/jsonnet-compiled/util/vendor/1.29 b/operations/jsonnet-compiled/util/vendor/1.29 new file mode 120000 index 00000000000..70e86069e0f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/1.29 @@ -0,0 +1 @@ +github.com/jsonnet-libs/k8s-libsonnet/1.29 \ No newline at end of file diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet deleted file mode 100644 index b4f76e5d51f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/serviceReference.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='serviceReference', url='', help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet deleted file mode 100644 index f6c6a97136d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/webhookClientConfig.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='webhookClientConfig', url='', help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { service+: { name: name } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { service+: { namespace: namespace } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { service+: { path: path } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { service+: { port: port } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { caBundle: caBundle }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { url: url }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet deleted file mode 100644 index 8802f84a51d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/main.libsonnet +++ /dev/null @@ -1,11 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - mutatingWebhook: (import 'mutatingWebhook.libsonnet'), - mutatingWebhookConfiguration: (import 'mutatingWebhookConfiguration.libsonnet'), - ruleWithOperations: (import 'ruleWithOperations.libsonnet'), - serviceReference: (import 'serviceReference.libsonnet'), - validatingWebhook: (import 'validatingWebhook.libsonnet'), - validatingWebhookConfiguration: (import 'validatingWebhookConfiguration.libsonnet'), - webhookClientConfig: (import 'webhookClientConfig.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet deleted file mode 100644 index 911d7310348..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhook.libsonnet +++ /dev/null @@ -1,66 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='mutatingWebhook', url='', help='"MutatingWebhook describes an admission webhook and the resources and operations it applies to."'), - '#clientConfig':: d.obj(help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - clientConfig: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { clientConfig+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { clientConfig+: { service+: { namespace: namespace } } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { clientConfig+: { service+: { path: path } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { clientConfig+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { clientConfig+: { caBundle: caBundle } }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { clientConfig+: { url: url } }, - }, - '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - namespaceSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, - }, - '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - objectSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, - }, - '#withAdmissionReviewVersions':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersions(admissionReviewVersions): { admissionReviewVersions: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withAdmissionReviewVersionsMixin':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore."', args=[d.arg(name='failurePolicy', type=d.T.string)]), - withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, - '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Exact\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), - withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, - '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withReinvocationPolicy':: d.fn(help='"reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \\"Never\\" and \\"IfNeeded\\".\\n\\nNever: the webhook will not be called more than once in a single admission evaluation.\\n\\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\\n\\nDefaults to \\"Never\\"."', args=[d.arg(name='reinvocationPolicy', type=d.T.string)]), - withReinvocationPolicy(reinvocationPolicy): { reinvocationPolicy: reinvocationPolicy }, - '#withRules':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withSideEffects':: d.fn(help='"SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown."', args=[d.arg(name='sideEffects', type=d.T.string)]), - withSideEffects(sideEffects): { sideEffects: sideEffects }, - '#withTimeoutSeconds':: d.fn(help='"TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds."', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), - withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet deleted file mode 100644 index ed5a67a18bf..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/mutatingWebhookConfiguration.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='mutatingWebhookConfiguration', url='', help='"MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of MutatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'admissionregistration.k8s.io/v1beta1', - kind: 'MutatingWebhookConfiguration', - } + self.metadata.withName(name=name), - '#withWebhooks':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooks(webhooks): { webhooks: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#withWebhooksMixin':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooksMixin(webhooks): { webhooks+: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet deleted file mode 100644 index b4f76e5d51f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='serviceReference', url='', help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet deleted file mode 100644 index fb2074edb84..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhook.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='validatingWebhook', url='', help='"ValidatingWebhook describes an admission webhook and the resources and operations it applies to."'), - '#clientConfig':: d.obj(help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - clientConfig: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { clientConfig+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { clientConfig+: { service+: { namespace: namespace } } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { clientConfig+: { service+: { path: path } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { clientConfig+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { clientConfig+: { caBundle: caBundle } }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { clientConfig+: { url: url } }, - }, - '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - namespaceSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, - }, - '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - objectSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, - }, - '#withAdmissionReviewVersions':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersions(admissionReviewVersions): { admissionReviewVersions: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withAdmissionReviewVersionsMixin':: d.fn(help="\"AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='admissionReviewVersions', type=d.T.array)]), - withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, - '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore."', args=[d.arg(name='failurePolicy', type=d.T.string)]), - withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, - '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Exact\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), - withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, - '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withRules':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withSideEffects':: d.fn(help='"SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown."', args=[d.arg(name='sideEffects', type=d.T.string)]), - withSideEffects(sideEffects): { sideEffects: sideEffects }, - '#withTimeoutSeconds':: d.fn(help='"TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds."', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), - withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet deleted file mode 100644 index 2325ced2e14..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/validatingWebhookConfiguration.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='validatingWebhookConfiguration', url='', help='"ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ValidatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'admissionregistration.k8s.io/v1beta1', - kind: 'ValidatingWebhookConfiguration', - } + self.metadata.withName(name=name), - '#withWebhooks':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooks(webhooks): { webhooks: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#withWebhooksMixin':: d.fn(help='"Webhooks is a list of webhooks and the affected resources and operations."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='webhooks', type=d.T.array)]), - withWebhooksMixin(webhooks): { webhooks+: if std.isArray(v=webhooks) then webhooks else [webhooks] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet deleted file mode 100644 index f6c6a97136d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='webhookClientConfig', url='', help='"WebhookClientConfig contains the information to make a TLS connection with the webhook"'), - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"`name` is the name of the service. Required"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { service+: { name: name } }, - '#withNamespace':: d.fn(help='"`namespace` is the namespace of the service. Required"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { service+: { namespace: namespace } }, - '#withPath':: d.fn(help='"`path` is an optional URL path which will be sent in any request to this service."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { service+: { path: path } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { service+: { port: port } }, - }, - '#withCaBundle':: d.fn(help="\"`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { caBundle: caBundle }, - '#withUrl':: d.fn(help='"`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\\n\\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\\n\\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\\n\\nThe scheme must be \\"https\\"; the URL must begin with \\"https://\\".\\n\\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\\n\\nAttempting to use a user or basic auth e.g. \\"user:password@\\" is not allowed. Fragments (\\"#...\\") and query parameters (\\"?...\\") are not allowed, either."', args=[d.arg(name='url', type=d.T.string)]), - withUrl(url): { url: url }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet deleted file mode 100644 index d79fb5e3d58..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceCondition.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceCondition', url='', help='"APIServiceCondition describes the state of an APIService at a particular point"'), - '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), - withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"Human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"Unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"Type is the type of the condition."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet deleted file mode 100644 index 64186da468f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiServiceStatus.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceStatus', url='', help='"APIServiceStatus contains derived information about an API server"'), - '#withConditions':: d.fn(help='"Current service state of apiService."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"Current service state of apiService."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet deleted file mode 100644 index 7e3e0042e1f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/serviceReference.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='serviceReference', url='', help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet deleted file mode 100644 index f743ceb7dd2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiService.libsonnet +++ /dev/null @@ -1,80 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiService', url='', help='"APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\"."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of APIService', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'apiregistration.k8s.io/v1beta1', - kind: 'APIService', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification."'), - spec: { - '#service':: d.obj(help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - service: { - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { service+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { service+: { namespace: namespace } } }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { spec+: { service+: { port: port } } }, - }, - '#withCaBundle':: d.fn(help="\"CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.\"", args=[d.arg(name='caBundle', type=d.T.string)]), - withCaBundle(caBundle): { spec+: { caBundle: caBundle } }, - '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { group: group } }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), - withGroupPriorityMinimum(groupPriorityMinimum): { spec+: { groupPriorityMinimum: groupPriorityMinimum } }, - '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), - withInsecureSkipTLSVerify(insecureSkipTLSVerify): { spec+: { insecureSkipTLSVerify: insecureSkipTLSVerify } }, - '#withVersion':: d.fn(help='"Version is the API version this server hosts. For example, \\"v1\\', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { version: version } }, - '#withVersionPriority':: d.fn(help="\"VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \\\"kube-like\\\", it will sort above non \\\"kube-like\\\" version strings, which are ordered lexicographically. \\\"Kube-like\\\" versions start with a \\\"v\\\", then are followed by a number (the major version), then optionally the string \\\"alpha\\\" or \\\"beta\\\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.\"", args=[d.arg(name='versionPriority', type=d.T.integer)]), - withVersionPriority(versionPriority): { spec+: { versionPriority: versionPriority } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet deleted file mode 100644 index d79fb5e3d58..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceCondition', url='', help='"APIServiceCondition describes the state of an APIService at a particular point"'), - '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), - withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"Human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"Unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"Type is the type of the condition."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet deleted file mode 100644 index 64186da468f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='apiServiceStatus', url='', help='"APIServiceStatus contains derived information about an API server"'), - '#withConditions':: d.fn(help='"Current service state of apiService."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"Current service state of apiService."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet deleted file mode 100644 index 2cdc33f58df..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/main.libsonnet +++ /dev/null @@ -1,9 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - apiService: (import 'apiService.libsonnet'), - apiServiceCondition: (import 'apiServiceCondition.libsonnet'), - apiServiceSpec: (import 'apiServiceSpec.libsonnet'), - apiServiceStatus: (import 'apiServiceStatus.libsonnet'), - serviceReference: (import 'serviceReference.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet deleted file mode 100644 index 7e3e0042e1f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='serviceReference', url='', help='"ServiceReference holds a reference to Service.legacy.k8s.io"'), - '#withName':: d.fn(help='"Name is the name of the service"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the service"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withPort':: d.fn(help='"If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive)."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet deleted file mode 100644 index 1e01fe67d32..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rollingUpdateStatefulSetStrategy', url='', help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet deleted file mode 100644 index 8744aa890a6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewSpec.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReviewSpec', url='', help='"TokenReviewSpec is a description of the token authentication request."'), - '#withAudiences':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withToken':: d.fn(help='"Token is the opaque bearer token."', args=[d.arg(name='token', type=d.T.string)]), - withToken(token): { token: token }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet deleted file mode 100644 index 04242af8437..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReviewStatus.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReviewStatus', url='', help='"TokenReviewStatus is the result of the token authentication request."'), - '#user':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), - user: { - '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { user+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { user+: { extra+: extra } }, - '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { user+: { groups: if std.isArray(v=groups) then groups else [groups] } }, - '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { user+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, - '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { user+: { uid: uid } }, - '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { user+: { username: username } }, - }, - '#withAudiences':: d.fn(help="\"Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \\\"true\\\", the token is valid against the audience of the Kubernetes API server.\"", args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help="\"Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \\\"true\\\", the token is valid against the audience of the Kubernetes API server.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAuthenticated':: d.fn(help='"Authenticated indicates that the token was associated with a known user."', args=[d.arg(name='authenticated', type=d.T.boolean)]), - withAuthenticated(authenticated): { authenticated: authenticated }, - '#withError':: d.fn(help="\"Error indicates that the token couldn't be checked\"", args=[d.arg(name='err', type=d.T.string)]), - withError(err): { 'error': err }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet deleted file mode 100644 index 7e4d2adb128..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/userInfo.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='userInfo', url='', help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), - '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { groups: if std.isArray(v=groups) then groups else [groups] }, - '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { groups+: if std.isArray(v=groups) then groups else [groups] }, - '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { username: username }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet deleted file mode 100644 index 5c9a07f9a3c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - tokenReview: (import 'tokenReview.libsonnet'), - tokenReviewSpec: (import 'tokenReviewSpec.libsonnet'), - tokenReviewStatus: (import 'tokenReviewStatus.libsonnet'), - userInfo: (import 'userInfo.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet deleted file mode 100644 index 5052a3363e8..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReview.libsonnet +++ /dev/null @@ -1,65 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReview', url='', help='"TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of TokenReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authentication.k8s.io/v1beta1', - kind: 'TokenReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"TokenReviewSpec is a description of the token authentication request."'), - spec: { - '#withAudiences':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { spec+: { audiences: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withAudiencesMixin':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { spec+: { audiences+: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withToken':: d.fn(help='"Token is the opaque bearer token."', args=[d.arg(name='token', type=d.T.string)]), - withToken(token): { spec+: { token: token } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet deleted file mode 100644 index 8744aa890a6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReviewSpec', url='', help='"TokenReviewSpec is a description of the token authentication request."'), - '#withAudiences':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help='"Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withToken':: d.fn(help='"Token is the opaque bearer token."', args=[d.arg(name='token', type=d.T.string)]), - withToken(token): { token: token }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet deleted file mode 100644 index 04242af8437..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenReviewStatus', url='', help='"TokenReviewStatus is the result of the token authentication request."'), - '#user':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), - user: { - '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { user+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { user+: { extra+: extra } }, - '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { user+: { groups: if std.isArray(v=groups) then groups else [groups] } }, - '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { user+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, - '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { user+: { uid: uid } }, - '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { user+: { username: username } }, - }, - '#withAudiences':: d.fn(help="\"Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \\\"true\\\", the token is valid against the audience of the Kubernetes API server.\"", args=[d.arg(name='audiences', type=d.T.array)]), - withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help="\"Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \\\"true\\\", the token is valid against the audience of the Kubernetes API server.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='audiences', type=d.T.array)]), - withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAuthenticated':: d.fn(help='"Authenticated indicates that the token was associated with a known user."', args=[d.arg(name='authenticated', type=d.T.boolean)]), - withAuthenticated(authenticated): { authenticated: authenticated }, - '#withError':: d.fn(help="\"Error indicates that the token couldn't be checked\"", args=[d.arg(name='err', type=d.T.string)]), - withError(err): { 'error': err }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet deleted file mode 100644 index 7e4d2adb128..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='userInfo', url='', help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), - '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { groups: if std.isArray(v=groups) then groups else [groups] }, - '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { groups+: if std.isArray(v=groups) then groups else [groups] }, - '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { username: username }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet deleted file mode 100644 index e01d98c7720..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceAttributes.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nonResourceAttributes', url='', help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { verb: verb }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet deleted file mode 100644 index 1508d63a79d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/nonResourceRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nonResourceRule', url='', help='"NonResourceRule holds information that describes a rule for the non-resource"'), - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \\"*\\" means all."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withVerbs':: d.fn(help='"Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \\"*\\" means all."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet deleted file mode 100644 index b6e78dcf607..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceAttributes.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceAttributes', url='', help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { group: group }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resource: resource }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { subresource: subresource }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { verb: verb }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { version: version }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet deleted file mode 100644 index e7387e729a7..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/resourceRule.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceRule', url='', help="\"ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\""), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"*\\" means all."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \\"*\\" means all."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. \\\"*\\\" means all in the specified apiGroups.\\n \\\"*/foo\\\" represents the subresource 'foo' for all resources in the specified apiGroups.\"", args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. \\\"*\\\" means all in the specified apiGroups.\\n \\\"*/foo\\\" represents the subresource 'foo' for all resources in the specified apiGroups.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet deleted file mode 100644 index c166be0e06b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet +++ /dev/null @@ -1,30 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectAccessReviewSpec', url='', help='"SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { nonResourceAttributes+: { path: path } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { nonResourceAttributes+: { verb: verb } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { resourceAttributes+: { group: group } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resourceAttributes+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { resourceAttributes+: { namespace: namespace } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceAttributes+: { resource: resource } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { resourceAttributes+: { subresource: subresource } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { resourceAttributes+: { verb: verb } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { resourceAttributes+: { version: version } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet deleted file mode 100644 index 901f17b4af6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help=''), - '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet deleted file mode 100644 index 77871364b61..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReviewStatus', url='', help='"SubjectAccessReviewStatus"'), - '#withAllowed':: d.fn(help='"Allowed is required. True if the action would be allowed, false otherwise."', args=[d.arg(name='allowed', type=d.T.boolean)]), - withAllowed(allowed): { allowed: allowed }, - '#withDenied':: d.fn(help='"Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true."', args=[d.arg(name='denied', type=d.T.boolean)]), - withDenied(denied): { denied: denied }, - '#withEvaluationError':: d.fn(help='"EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request."', args=[d.arg(name='evaluationError', type=d.T.string)]), - withEvaluationError(evaluationError): { evaluationError: evaluationError }, - '#withReason':: d.fn(help='"Reason is optional. It indicates why a request was allowed or denied."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet deleted file mode 100644 index 43340113936..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectRulesReviewStatus', url='', help="\"SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.\""), - '#withEvaluationError':: d.fn(help="\"EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.\"", args=[d.arg(name='evaluationError', type=d.T.string)]), - withEvaluationError(evaluationError): { evaluationError: evaluationError }, - '#withIncomplete':: d.fn(help="\"Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.\"", args=[d.arg(name='incomplete', type=d.T.boolean)]), - withIncomplete(incomplete): { incomplete: incomplete }, - '#withNonResourceRules':: d.fn(help="\"NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"", args=[d.arg(name='nonResourceRules', type=d.T.array)]), - withNonResourceRules(nonResourceRules): { nonResourceRules: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, - '#withNonResourceRulesMixin':: d.fn(help="\"NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nonResourceRules', type=d.T.array)]), - withNonResourceRulesMixin(nonResourceRules): { nonResourceRules+: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, - '#withResourceRules':: d.fn(help="\"ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"", args=[d.arg(name='resourceRules', type=d.T.array)]), - withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, - '#withResourceRulesMixin':: d.fn(help="\"ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resourceRules', type=d.T.array)]), - withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet deleted file mode 100644 index 222cdd4496d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/localSubjectAccessReview.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='localSubjectAccessReview', url='', help='"LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of LocalSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'LocalSubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { spec+: { group: if std.isArray(v=group) then group else [group] } }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { spec+: { group+: if std.isArray(v=group) then group else [group] } }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { user: user } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet deleted file mode 100644 index 58919359023..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/main.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - localSubjectAccessReview: (import 'localSubjectAccessReview.libsonnet'), - nonResourceAttributes: (import 'nonResourceAttributes.libsonnet'), - nonResourceRule: (import 'nonResourceRule.libsonnet'), - resourceAttributes: (import 'resourceAttributes.libsonnet'), - resourceRule: (import 'resourceRule.libsonnet'), - selfSubjectAccessReview: (import 'selfSubjectAccessReview.libsonnet'), - selfSubjectAccessReviewSpec: (import 'selfSubjectAccessReviewSpec.libsonnet'), - selfSubjectRulesReview: (import 'selfSubjectRulesReview.libsonnet'), - selfSubjectRulesReviewSpec: (import 'selfSubjectRulesReviewSpec.libsonnet'), - subjectAccessReview: (import 'subjectAccessReview.libsonnet'), - subjectAccessReviewSpec: (import 'subjectAccessReviewSpec.libsonnet'), - subjectAccessReviewStatus: (import 'subjectAccessReviewStatus.libsonnet'), - subjectRulesReviewStatus: (import 'subjectRulesReviewStatus.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet deleted file mode 100644 index e01d98c7720..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nonResourceAttributes', url='', help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { verb: verb }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet deleted file mode 100644 index 1508d63a79d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nonResourceRule', url='', help='"NonResourceRule holds information that describes a rule for the non-resource"'), - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \\"*\\" means all."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withVerbs':: d.fn(help='"Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \\"*\\" means all."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet deleted file mode 100644 index b6e78dcf607..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceAttributes', url='', help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { group: group }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resource: resource }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { subresource: subresource }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { verb: verb }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { version: version }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet deleted file mode 100644 index e7387e729a7..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceRule', url='', help="\"ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\""), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"*\\" means all."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \\"*\\" means all."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. \\\"*\\\" means all in the specified apiGroups.\\n \\\"*/foo\\\" represents the subresource 'foo' for all resources in the specified apiGroups.\"", args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. \\\"*\\\" means all in the specified apiGroups.\\n \\\"*/foo\\\" represents the subresource 'foo' for all resources in the specified apiGroups.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet deleted file mode 100644 index ec9d4d8468e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReview.libsonnet +++ /dev/null @@ -1,83 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectAccessReview', url='', help='"SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \\"in all namespaces\\". Self is a special case, because users should always be able to check whether they can perform an action"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of SelfSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SelfSubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet deleted file mode 100644 index c166be0e06b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet +++ /dev/null @@ -1,30 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectAccessReviewSpec', url='', help='"SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { nonResourceAttributes+: { path: path } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { nonResourceAttributes+: { verb: verb } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { resourceAttributes+: { group: group } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resourceAttributes+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { resourceAttributes+: { namespace: namespace } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceAttributes+: { resource: resource } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { resourceAttributes+: { subresource: subresource } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { resourceAttributes+: { verb: verb } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { resourceAttributes+: { version: version } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet deleted file mode 100644 index 721fa55497c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReview.libsonnet +++ /dev/null @@ -1,61 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReview', url='', help="\"SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.\""), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of SelfSubjectRulesReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SelfSubjectRulesReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help=''), - spec: { - '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { namespace: namespace } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet deleted file mode 100644 index 901f17b4af6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectRulesReviewSpec.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help=''), - '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet deleted file mode 100644 index accfb6f6c54..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReview.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReview', url='', help='"SubjectAccessReview checks whether or not a user or group can perform an action."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of SubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'authorization.k8s.io/v1beta1', - kind: 'SubjectAccessReview', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - spec: { - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { nonResourceAttributes+: { path: path } } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { nonResourceAttributes+: { verb: verb } } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { resourceAttributes+: { group: group } } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { resourceAttributes+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { resourceAttributes+: { namespace: namespace } } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { spec+: { resourceAttributes+: { resource: resource } } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { spec+: { resourceAttributes+: { subresource: subresource } } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { spec+: { resourceAttributes+: { verb: verb } } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { spec+: { resourceAttributes+: { version: version } } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { spec+: { group: if std.isArray(v=group) then group else [group] } }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { spec+: { group+: if std.isArray(v=group) then group else [group] } }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { user: user } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet deleted file mode 100644 index 3a787fb74ab..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewSpec.libsonnet +++ /dev/null @@ -1,42 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReviewSpec', url='', help='"SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set"'), - '#nonResourceAttributes':: d.obj(help='"NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface"'), - nonResourceAttributes: { - '#withPath':: d.fn(help='"Path is the URL path of the request"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { nonResourceAttributes+: { path: path } }, - '#withVerb':: d.fn(help='"Verb is the standard HTTP verb"', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { nonResourceAttributes+: { verb: verb } }, - }, - '#resourceAttributes':: d.obj(help='"ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface"'), - resourceAttributes: { - '#withGroup':: d.fn(help='"Group is the API Group of the Resource. \\"*\\" means all."', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { resourceAttributes+: { group: group } }, - '#withName':: d.fn(help='"Name is the name of the resource being requested for a \\"get\\" or deleted for a \\"delete\\". \\"\\" (empty) means all."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resourceAttributes+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \\"\\" (empty) is defaulted for LocalSubjectAccessReviews \\"\\" (empty) is empty for cluster-scoped resources \\"\\" (empty) means \\"all\\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { resourceAttributes+: { namespace: namespace } }, - '#withResource':: d.fn(help='"Resource is one of the existing resource types. \\"*\\" means all."', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceAttributes+: { resource: resource } }, - '#withSubresource':: d.fn(help='"Subresource is one of the existing resource types. \\"\\" means none."', args=[d.arg(name='subresource', type=d.T.string)]), - withSubresource(subresource): { resourceAttributes+: { subresource: subresource } }, - '#withVerb':: d.fn(help='"Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \\"*\\" means all."', args=[d.arg(name='verb', type=d.T.string)]), - withVerb(verb): { resourceAttributes+: { verb: verb } }, - '#withVersion':: d.fn(help='"Version is the API Version of the Resource. \\"*\\" means all."', args=[d.arg(name='version', type=d.T.string)]), - withVersion(version): { resourceAttributes+: { version: version } }, - }, - '#withExtra':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroup':: d.fn(help="\"Groups is the groups you're testing for.\"", args=[d.arg(name='group', type=d.T.array)]), - withGroup(group): { group: if std.isArray(v=group) then group else [group] }, - '#withGroupMixin':: d.fn(help="\"Groups is the groups you're testing for.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='group', type=d.T.array)]), - withGroupMixin(group): { group+: if std.isArray(v=group) then group else [group] }, - '#withUid':: d.fn(help='"UID information about the requesting user."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUser':: d.fn(help="\"User is the user you're testing for. If you specify \\\"User\\\" but not \\\"Group\\\", then is it interpreted as \\\"What if User were not a member of any groups\"", args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet deleted file mode 100644 index 77871364b61..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectAccessReviewStatus', url='', help='"SubjectAccessReviewStatus"'), - '#withAllowed':: d.fn(help='"Allowed is required. True if the action would be allowed, false otherwise."', args=[d.arg(name='allowed', type=d.T.boolean)]), - withAllowed(allowed): { allowed: allowed }, - '#withDenied':: d.fn(help='"Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true."', args=[d.arg(name='denied', type=d.T.boolean)]), - withDenied(denied): { denied: denied }, - '#withEvaluationError':: d.fn(help='"EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request."', args=[d.arg(name='evaluationError', type=d.T.string)]), - withEvaluationError(evaluationError): { evaluationError: evaluationError }, - '#withReason':: d.fn(help='"Reason is optional. It indicates why a request was allowed or denied."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet deleted file mode 100644 index 43340113936..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subjectRulesReviewStatus', url='', help="\"SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.\""), - '#withEvaluationError':: d.fn(help="\"EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.\"", args=[d.arg(name='evaluationError', type=d.T.string)]), - withEvaluationError(evaluationError): { evaluationError: evaluationError }, - '#withIncomplete':: d.fn(help="\"Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.\"", args=[d.arg(name='incomplete', type=d.T.boolean)]), - withIncomplete(incomplete): { incomplete: incomplete }, - '#withNonResourceRules':: d.fn(help="\"NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"", args=[d.arg(name='nonResourceRules', type=d.T.array)]), - withNonResourceRules(nonResourceRules): { nonResourceRules: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, - '#withNonResourceRulesMixin':: d.fn(help="\"NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nonResourceRules', type=d.T.array)]), - withNonResourceRulesMixin(nonResourceRules): { nonResourceRules+: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, - '#withResourceRules':: d.fn(help="\"ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"", args=[d.arg(name='resourceRules', type=d.T.array)]), - withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, - '#withResourceRulesMixin':: d.fn(help="\"ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resourceRules', type=d.T.array)]), - withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet deleted file mode 100644 index 55f20dbb892..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleStatus.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleStatus', url='', help='"ScaleStatus represents the current status of a scale subresource."'), - '#withReplicas':: d.fn(help='"actual number of observed instances of the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), - withReplicas(replicas): { replicas: replicas }, - '#withSelector':: d.fn(help='"label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors"', args=[d.arg(name='selector', type=d.T.string)]), - withSelector(selector): { selector: selector }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet deleted file mode 100644 index 93e908d8f3f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { targetAverageUtilization: targetAverageUtilization }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet deleted file mode 100644 index 79cdfc24bf3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/containerResourceMetricStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { currentAverageUtilization: currentAverageUtilization }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet deleted file mode 100644 index 9c2f69c2052..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricSource.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \\"target\\" type should be set."'), - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metricSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metricSelector+: { matchLabels+: matchLabels } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { targetValue: targetValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet deleted file mode 100644 index 27075d307e8..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/externalMetricStatus.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metricSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metricSelector+: { matchLabels+: matchLabels } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { currentValue: currentValue }, - '#withMetricName':: d.fn(help='"metricName is the name of a metric used for autoscaling in metric system."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet deleted file mode 100644 index e24587f6fac..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscaler.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'autoscaling/v2beta1', - kind: 'HorizontalPodAutoscaler', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), - spec: { - '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { scaleTargetRef+: { name: name } } }, - }, - '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), - withMaxReplicas(maxReplicas): { spec+: { maxReplicas: maxReplicas } }, - '#withMetrics':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."', args=[d.arg(name='metrics', type=d.T.array)]), - withMetrics(metrics): { spec+: { metrics: if std.isArray(v=metrics) then metrics else [metrics] } }, - '#withMetricsMixin':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='metrics', type=d.T.array)]), - withMetricsMixin(metrics): { spec+: { metrics+: if std.isArray(v=metrics) then metrics else [metrics] } }, - '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), - withMinReplicas(minReplicas): { spec+: { minReplicas: minReplicas } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet deleted file mode 100644 index b786b2fb152..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerSpec.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerSpec', url='', help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), - '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { scaleTargetRef+: { name: name } }, - }, - '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), - withMaxReplicas(maxReplicas): { maxReplicas: maxReplicas }, - '#withMetrics':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."', args=[d.arg(name='metrics', type=d.T.array)]), - withMetrics(metrics): { metrics: if std.isArray(v=metrics) then metrics else [metrics] }, - '#withMetricsMixin':: d.fn(help='"metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='metrics', type=d.T.array)]), - withMetricsMixin(metrics): { metrics+: if std.isArray(v=metrics) then metrics else [metrics] }, - '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), - withMinReplicas(minReplicas): { minReplicas: minReplicas }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet deleted file mode 100644 index e4753e76278..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerStatus.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerStatus', url='', help='"HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler."'), - '#withConditions':: d.fn(help='"conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withCurrentMetrics':: d.fn(help='"currentMetrics is the last read state of the metrics used by this autoscaler."', args=[d.arg(name='currentMetrics', type=d.T.array)]), - withCurrentMetrics(currentMetrics): { currentMetrics: if std.isArray(v=currentMetrics) then currentMetrics else [currentMetrics] }, - '#withCurrentMetricsMixin':: d.fn(help='"currentMetrics is the last read state of the metrics used by this autoscaler."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='currentMetrics', type=d.T.array)]), - withCurrentMetricsMixin(currentMetrics): { currentMetrics+: if std.isArray(v=currentMetrics) then currentMetrics else [currentMetrics] }, - '#withCurrentReplicas':: d.fn(help='"currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), - withCurrentReplicas(currentReplicas): { currentReplicas: currentReplicas }, - '#withDesiredReplicas':: d.fn(help='"desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), - withDesiredReplicas(desiredReplicas): { desiredReplicas: desiredReplicas }, - '#withLastScaleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScaleTime', type=d.T.string)]), - withLastScaleTime(lastScaleTime): { lastScaleTime: lastScaleTime }, - '#withObservedGeneration':: d.fn(help='"observedGeneration is the most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), - withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet deleted file mode 100644 index b178832c07c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/main.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v2beta1', url='', help=''), - containerResourceMetricSource: (import 'containerResourceMetricSource.libsonnet'), - containerResourceMetricStatus: (import 'containerResourceMetricStatus.libsonnet'), - crossVersionObjectReference: (import 'crossVersionObjectReference.libsonnet'), - externalMetricSource: (import 'externalMetricSource.libsonnet'), - externalMetricStatus: (import 'externalMetricStatus.libsonnet'), - horizontalPodAutoscaler: (import 'horizontalPodAutoscaler.libsonnet'), - horizontalPodAutoscalerCondition: (import 'horizontalPodAutoscalerCondition.libsonnet'), - horizontalPodAutoscalerSpec: (import 'horizontalPodAutoscalerSpec.libsonnet'), - horizontalPodAutoscalerStatus: (import 'horizontalPodAutoscalerStatus.libsonnet'), - metricSpec: (import 'metricSpec.libsonnet'), - metricStatus: (import 'metricStatus.libsonnet'), - objectMetricSource: (import 'objectMetricSource.libsonnet'), - objectMetricStatus: (import 'objectMetricStatus.libsonnet'), - podsMetricSource: (import 'podsMetricSource.libsonnet'), - podsMetricStatus: (import 'podsMetricStatus.libsonnet'), - resourceMetricSource: (import 'resourceMetricSource.libsonnet'), - resourceMetricStatus: (import 'resourceMetricStatus.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet deleted file mode 100644 index 31c07f89e10..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricSpec.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - containerResource: { - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { containerResource+: { targetAverageUtilization: targetAverageUtilization } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { containerResource+: { targetAverageValue: targetAverageValue } }, - }, - '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \\"target\\" type should be set."'), - external: { - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metricSelector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metricSelector+: { matchLabels+: matchLabels } } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { external+: { metricName: metricName } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { external+: { targetAverageValue: targetAverageValue } }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { external+: { targetValue: targetValue } }, - }, - '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { target+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { target+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { target+: { name: name } } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { averageValue: averageValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { object+: { metricName: metricName } }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { object+: { targetValue: targetValue } }, - }, - '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - pods: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { pods+: { metricName: metricName } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { pods+: { targetAverageValue: targetAverageValue } }, - }, - '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - resource: { - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { resource+: { targetAverageUtilization: targetAverageUtilization } }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { resource+: { targetAverageValue: targetAverageValue } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet deleted file mode 100644 index 2ecd28f3e3d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/metricStatus.libsonnet +++ /dev/null @@ -1,95 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - containerResource: { - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { containerResource+: { currentAverageUtilization: currentAverageUtilization } }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { containerResource+: { currentAverageValue: currentAverageValue } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - external: { - '#metricSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - metricSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metricSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metricSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metricSelector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metricSelector+: { matchLabels+: matchLabels } } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { external+: { currentAverageValue: currentAverageValue } }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { external+: { currentValue: currentValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of a metric used for autoscaling in metric system."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { external+: { metricName: metricName } }, - }, - '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { target+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { target+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { target+: { name: name } } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { averageValue: averageValue } }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { object+: { currentValue: currentValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { object+: { metricName: metricName } }, - }, - '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - pods: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { pods+: { currentAverageValue: currentAverageValue } }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { pods+: { metricName: metricName } }, - }, - '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - resource: { - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { resource+: { currentAverageUtilization: currentAverageUtilization } }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { resource+: { currentAverageValue: currentAverageValue } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet deleted file mode 100644 index a08c3167687..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricSource.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { target+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { target+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { target+: { name: name } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetValue', type=d.T.string)]), - withTargetValue(targetValue): { targetValue: targetValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet deleted file mode 100644 index 4e8b05d6ab6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/objectMetricStatus.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#target':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - target: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { target+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { target+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { target+: { name: name } }, - }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withCurrentValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentValue', type=d.T.string)]), - withCurrentValue(currentValue): { currentValue: currentValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question."', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet deleted file mode 100644 index 5174952a2bb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricSource.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet deleted file mode 100644 index 635cc310780..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/podsMetricStatus.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, - }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withMetricName':: d.fn(help='"metricName is the name of the metric in question"', args=[d.arg(name='metricName', type=d.T.string)]), - withMetricName(metricName): { metricName: metricName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet deleted file mode 100644 index 17626b68b52..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withTargetAverageUtilization':: d.fn(help='"targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='targetAverageUtilization', type=d.T.integer)]), - withTargetAverageUtilization(targetAverageUtilization): { targetAverageUtilization: targetAverageUtilization }, - '#withTargetAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='targetAverageValue', type=d.T.string)]), - withTargetAverageValue(targetAverageValue): { targetAverageValue: targetAverageValue }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet deleted file mode 100644 index 8a870c1eb84..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/resourceMetricStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#withCurrentAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification."', args=[d.arg(name='currentAverageUtilization', type=d.T.integer)]), - withCurrentAverageUtilization(currentAverageUtilization): { currentAverageUtilization: currentAverageUtilization }, - '#withCurrentAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='currentAverageValue', type=d.T.string)]), - withCurrentAverageValue(currentAverageValue): { currentAverageValue: currentAverageValue }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet deleted file mode 100644 index 531a0b6c6a7..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricSource.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet deleted file mode 100644 index b0c77e47e67..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/containerResourceMetricStatus.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#withContainer':: d.fn(help='"Container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { container: container }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet deleted file mode 100644 index 643713bbcdb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/crossVersionObjectReference.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet deleted file mode 100644 index ce91a7acb47..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet deleted file mode 100644 index d0c9b2884a1..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/externalMetricStatus.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet deleted file mode 100644 index e4753e76278..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerStatus', url='', help='"HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler."'), - '#withConditions':: d.fn(help='"conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withCurrentMetrics':: d.fn(help='"currentMetrics is the last read state of the metrics used by this autoscaler."', args=[d.arg(name='currentMetrics', type=d.T.array)]), - withCurrentMetrics(currentMetrics): { currentMetrics: if std.isArray(v=currentMetrics) then currentMetrics else [currentMetrics] }, - '#withCurrentMetricsMixin':: d.fn(help='"currentMetrics is the last read state of the metrics used by this autoscaler."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='currentMetrics', type=d.T.array)]), - withCurrentMetricsMixin(currentMetrics): { currentMetrics+: if std.isArray(v=currentMetrics) then currentMetrics else [currentMetrics] }, - '#withCurrentReplicas':: d.fn(help='"currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), - withCurrentReplicas(currentReplicas): { currentReplicas: currentReplicas }, - '#withDesiredReplicas':: d.fn(help='"desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), - withDesiredReplicas(desiredReplicas): { desiredReplicas: desiredReplicas }, - '#withLastScaleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScaleTime', type=d.T.string)]), - withLastScaleTime(lastScaleTime): { lastScaleTime: lastScaleTime }, - '#withObservedGeneration':: d.fn(help='"observedGeneration is the most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), - withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet deleted file mode 100644 index cc0b990cd2a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricSpec.libsonnet +++ /dev/null @@ -1,141 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - containerResource: { - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { containerResource+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { containerResource+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { containerResource+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { containerResource+: { target+: { value: value } } }, - }, - '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), - external: { - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { external+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { external+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { external+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { external+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { external+: { target+: { value: value } } }, - }, - }, - '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { describedObject+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { describedObject+: { name: name } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { object+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { object+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { object+: { target+: { value: value } } }, - }, - }, - '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - pods: { - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { pods+: { metric+: { name: name } } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { pods+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { pods+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { pods+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { pods+: { target+: { value: value } } }, - }, - }, - '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - resource: { - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { resource+: { target+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { resource+: { target+: { averageValue: averageValue } } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { resource+: { target+: { type: type } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { resource+: { target+: { value: value } } }, - }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet deleted file mode 100644 index 0b823f71881..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricStatus.libsonnet +++ /dev/null @@ -1,131 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), - '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - containerResource: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { containerResource+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { containerResource+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { containerResource+: { current+: { value: value } } }, - }, - '#withContainer':: d.fn(help='"Container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), - withContainer(container): { containerResource+: { container: container } }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { containerResource+: { name: name } }, - }, - '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), - external: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { external+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { external+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { external+: { current+: { value: value } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { external+: { metric+: { name: name } } }, - }, - }, - '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - object: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { object+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { object+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { object+: { current+: { value: value } } }, - }, - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { object+: { describedObject+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { describedObject+: { name: name } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { object+: { metric+: { name: name } } }, - }, - }, - '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - pods: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { pods+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { pods+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { pods+: { current+: { value: value } } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { pods+: { metric+: { name: name } } }, - }, - }, - '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - resource: { - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { resource+: { current+: { averageUtilization: averageUtilization } } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { resource+: { current+: { averageValue: averageValue } } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { resource+: { current+: { value: value } } }, - }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet deleted file mode 100644 index 63672a4066b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricTarget.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricTarget', url='', help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet deleted file mode 100644 index 5f220873496..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricValueStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='metricValueStatus', url='', help='"MetricValueStatus holds the current value for a metric"'), - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { averageValue: averageValue }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet deleted file mode 100644 index 1639bfcb0f3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricSource.libsonnet +++ /dev/null @@ -1,42 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { describedObject+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { describedObject+: { name: name } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet deleted file mode 100644 index bf16df5f94c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/objectMetricStatus.libsonnet +++ /dev/null @@ -1,40 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), - describedObject: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { describedObject+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { describedObject+: { name: name } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet deleted file mode 100644 index e21f772169d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet deleted file mode 100644 index 28c40525e2c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/podsMetricStatus.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), - metric: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metric+: { name: name } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet deleted file mode 100644 index c73209bea8b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricSource.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), - '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), - target: { - '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { target+: { averageValue: averageValue } }, - '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { target+: { type: type } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { target+: { value: value } }, - }, - '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet deleted file mode 100644 index ea377dc7cb0..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/resourceMetricStatus.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), - '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), - current: { - '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), - withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, - '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), - withAverageValue(averageValue): { current+: { averageValue: averageValue } }, - '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), - withValue(value): { current+: { value: value } }, - }, - '#withName':: d.fn(help='"Name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet deleted file mode 100644 index 165dfd4dbca..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJobStatus', url='', help='"CronJobStatus represents the current state of a cron job."'), - '#withActive':: d.fn(help='"A list of pointers to currently running jobs."', args=[d.arg(name='active', type=d.T.array)]), - withActive(active): { active: if std.isArray(v=active) then active else [active] }, - '#withActiveMixin':: d.fn(help='"A list of pointers to currently running jobs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='active', type=d.T.array)]), - withActiveMixin(active): { active+: if std.isArray(v=active) then active else [active] }, - '#withLastScheduleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScheduleTime', type=d.T.string)]), - withLastScheduleTime(lastScheduleTime): { lastScheduleTime: lastScheduleTime }, - '#withLastSuccessfulTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastSuccessfulTime', type=d.T.string)]), - withLastSuccessfulTime(lastSuccessfulTime): { lastSuccessfulTime: lastSuccessfulTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet deleted file mode 100644 index ed443054b55..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJob.libsonnet +++ /dev/null @@ -1,388 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJob', url='', help='"CronJob represents the configuration of a single cron job."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CronJob', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'batch/v1beta1', - kind: 'CronJob', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CronJobSpec describes how the job execution will look like and when it will actually run."'), - spec: { - '#jobTemplate':: d.obj(help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - jobTemplate: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { jobTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { metadata+: { clusterName: clusterName } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { jobTemplate+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { jobTemplate+: { metadata+: { deletionTimestamp: deletionTimestamp } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { jobTemplate+: { metadata+: { generateName: generateName } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { jobTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { jobTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { jobTemplate+: { metadata+: { labels+: labels } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { jobTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { jobTemplate+: { metadata+: { namespace: namespace } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { jobTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { jobTemplate+: { metadata+: { uid: uid } } } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { jobTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { jobTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { jobTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { jobTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostIPC: hostIPC } } } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeName: nodeName } } } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { subdomain: subdomain } } } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { spec+: { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { spec+: { jobTemplate+: { spec+: { completionMode: completionMode } } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { spec+: { jobTemplate+: { spec+: { completions: completions } } } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { spec+: { jobTemplate+: { spec+: { manualSelector: manualSelector } } } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { spec+: { jobTemplate+: { spec+: { parallelism: parallelism } } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { jobTemplate+: { spec+: { suspend: suspend } } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } } }, - }, - }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), - withConcurrencyPolicy(concurrencyPolicy): { spec+: { concurrencyPolicy: concurrencyPolicy } }, - '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), - withFailedJobsHistoryLimit(failedJobsHistoryLimit): { spec+: { failedJobsHistoryLimit: failedJobsHistoryLimit } }, - '#withSchedule':: d.fn(help='"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron."', args=[d.arg(name='schedule', type=d.T.string)]), - withSchedule(schedule): { spec+: { schedule: schedule } }, - '#withStartingDeadlineSeconds':: d.fn(help='"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones."', args=[d.arg(name='startingDeadlineSeconds', type=d.T.integer)]), - withStartingDeadlineSeconds(startingDeadlineSeconds): { spec+: { startingDeadlineSeconds: startingDeadlineSeconds } }, - '#withSuccessfulJobsHistoryLimit':: d.fn(help='"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3."', args=[d.arg(name='successfulJobsHistoryLimit', type=d.T.integer)]), - withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { spec+: { successfulJobsHistoryLimit: successfulJobsHistoryLimit } }, - '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { suspend: suspend } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet deleted file mode 100644 index 2ed68fed69e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobSpec.libsonnet +++ /dev/null @@ -1,335 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJobSpec', url='', help='"CronJobSpec describes how the job execution will look like and when it will actually run."'), - '#jobTemplate':: d.obj(help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - jobTemplate: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { jobTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { jobTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { metadata+: { clusterName: clusterName } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { jobTemplate+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { jobTemplate+: { metadata+: { deletionTimestamp: deletionTimestamp } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { jobTemplate+: { metadata+: { generateName: generateName } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { jobTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { jobTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { jobTemplate+: { metadata+: { labels+: labels } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { jobTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { jobTemplate+: { metadata+: { namespace: namespace } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { jobTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { jobTemplate+: { metadata+: { uid: uid } } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { jobTemplate+: { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { jobTemplate+: { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { jobTemplate+: { spec+: { selector+: { matchLabels: matchLabels } } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { jobTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { jobTemplate+: { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { jobTemplate+: { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { jobTemplate+: { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { jobTemplate+: { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { jobTemplate+: { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { jobTemplate+: { spec+: { template+: { spec+: { hostIPC: hostIPC } } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { jobTemplate+: { spec+: { template+: { spec+: { nodeName: nodeName } } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { jobTemplate+: { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { jobTemplate+: { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { jobTemplate+: { spec+: { template+: { spec+: { subdomain: subdomain } } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { jobTemplate+: { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { jobTemplate+: { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { jobTemplate+: { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { jobTemplate+: { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { jobTemplate+: { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { jobTemplate+: { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { jobTemplate+: { spec+: { completionMode: completionMode } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { jobTemplate+: { spec+: { completions: completions } } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { jobTemplate+: { spec+: { manualSelector: manualSelector } } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { jobTemplate+: { spec+: { parallelism: parallelism } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { jobTemplate+: { spec+: { suspend: suspend } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } }, - }, - }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), - withConcurrencyPolicy(concurrencyPolicy): { concurrencyPolicy: concurrencyPolicy }, - '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), - withFailedJobsHistoryLimit(failedJobsHistoryLimit): { failedJobsHistoryLimit: failedJobsHistoryLimit }, - '#withSchedule':: d.fn(help='"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron."', args=[d.arg(name='schedule', type=d.T.string)]), - withSchedule(schedule): { schedule: schedule }, - '#withStartingDeadlineSeconds':: d.fn(help='"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones."', args=[d.arg(name='startingDeadlineSeconds', type=d.T.integer)]), - withStartingDeadlineSeconds(startingDeadlineSeconds): { startingDeadlineSeconds: startingDeadlineSeconds }, - '#withSuccessfulJobsHistoryLimit':: d.fn(help='"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3."', args=[d.arg(name='successfulJobsHistoryLimit', type=d.T.integer)]), - withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { successfulJobsHistoryLimit: successfulJobsHistoryLimit }, - '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { suspend: suspend }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet deleted file mode 100644 index 165dfd4dbca..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cronJobStatus', url='', help='"CronJobStatus represents the current state of a cron job."'), - '#withActive':: d.fn(help='"A list of pointers to currently running jobs."', args=[d.arg(name='active', type=d.T.array)]), - withActive(active): { active: if std.isArray(v=active) then active else [active] }, - '#withActiveMixin':: d.fn(help='"A list of pointers to currently running jobs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='active', type=d.T.array)]), - withActiveMixin(active): { active+: if std.isArray(v=active) then active else [active] }, - '#withLastScheduleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScheduleTime', type=d.T.string)]), - withLastScheduleTime(lastScheduleTime): { lastScheduleTime: lastScheduleTime }, - '#withLastSuccessfulTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastSuccessfulTime', type=d.T.string)]), - withLastSuccessfulTime(lastSuccessfulTime): { lastSuccessfulTime: lastSuccessfulTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet deleted file mode 100644 index 5b4af07f25b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/jobTemplateSpec.libsonnet +++ /dev/null @@ -1,320 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='jobTemplateSpec', url='', help='"JobTemplateSpec describes the data a Job should have when created from a template"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#template':: d.obj(help='"PodTemplateSpec describes the data a pod should have when created from a template"'), - template: { - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { template+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { spec+: { template+: { metadata+: { deletionTimestamp: deletionTimestamp } } } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, - }, - '#spec':: d.obj(help='"PodSpec is a description of a pod."'), - spec: { - '#affinity':: d.obj(help='"Affinity is a group of affinity scheduling rules."'), - affinity: { - '#nodeAffinity':: d.obj(help='"Node affinity is a group of node affinity scheduling rules."'), - nodeAffinity: { - '#requiredDuringSchedulingIgnoredDuringExecution':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - requiredDuringSchedulingIgnoredDuringExecution: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } } }, - }, - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { nodeAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - '#podAffinity':: d.obj(help='"Pod affinity is a group of inter pod affinity scheduling rules."'), - podAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - '#podAntiAffinity':: d.obj(help='"Pod anti affinity is a group of inter pod anti affinity scheduling rules."'), - podAntiAffinity: { - '#withPreferredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecution(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withPreferredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \\"weight\\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='preferredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withPreferredDuringSchedulingIgnoredDuringExecutionMixin(preferredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { preferredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=preferredDuringSchedulingIgnoredDuringExecution) then preferredDuringSchedulingIgnoredDuringExecution else [preferredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecution':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - '#withRequiredDuringSchedulingIgnoredDuringExecutionMixin':: d.fn(help='"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDuringSchedulingIgnoredDuringExecution', type=d.T.array)]), - withRequiredDuringSchedulingIgnoredDuringExecutionMixin(requiredDuringSchedulingIgnoredDuringExecution): { spec+: { template+: { spec+: { affinity+: { podAntiAffinity+: { requiredDuringSchedulingIgnoredDuringExecution+: if std.isArray(v=requiredDuringSchedulingIgnoredDuringExecution) then requiredDuringSchedulingIgnoredDuringExecution else [requiredDuringSchedulingIgnoredDuringExecution] } } } } } }, - }, - }, - '#dnsConfig':: d.obj(help='"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy."'), - dnsConfig: { - '#withNameservers':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameservers(nameservers): { spec+: { template+: { spec+: { dnsConfig+: { nameservers: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } }, - '#withNameserversMixin':: d.fn(help='"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nameservers', type=d.T.array)]), - withNameserversMixin(nameservers): { spec+: { template+: { spec+: { dnsConfig+: { nameservers+: if std.isArray(v=nameservers) then nameservers else [nameservers] } } } } }, - '#withOptions':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."', args=[d.arg(name='options', type=d.T.array)]), - withOptions(options): { spec+: { template+: { spec+: { dnsConfig+: { options: if std.isArray(v=options) then options else [options] } } } } }, - '#withOptionsMixin':: d.fn(help='"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.array)]), - withOptionsMixin(options): { spec+: { template+: { spec+: { dnsConfig+: { options+: if std.isArray(v=options) then options else [options] } } } } }, - '#withSearches':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."', args=[d.arg(name='searches', type=d.T.array)]), - withSearches(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches: if std.isArray(v=searches) then searches else [searches] } } } } }, - '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), - withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, - }, - '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), - securityContext: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { level: level } } } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { role: role } } } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { type: type } } } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { template+: { spec+: { securityContext+: { seLinuxOptions+: { user: user } } } } } }, - }, - '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), - withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, - '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, - }, - '#windowsOptions':: d.obj(help='"WindowsSecurityContextOptions contain Windows-specific options and credentials."'), - windowsOptions: { - '#withGmsaCredentialSpec':: d.fn(help='"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field."', args=[d.arg(name='gmsaCredentialSpec', type=d.T.string)]), - withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, - '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), - withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, - '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), - withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, - }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), - withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), - withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), - withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, - '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), - withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), - withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), - withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), - withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { template+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, - '#withAutomountServiceAccountToken':: d.fn(help='"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted."', args=[d.arg(name='automountServiceAccountToken', type=d.T.boolean)]), - withAutomountServiceAccountToken(automountServiceAccountToken): { spec+: { template+: { spec+: { automountServiceAccountToken: automountServiceAccountToken } } } }, - '#withContainers':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."', args=[d.arg(name='containers', type=d.T.array)]), - withContainers(containers): { spec+: { template+: { spec+: { containers: if std.isArray(v=containers) then containers else [containers] } } } }, - '#withContainersMixin':: d.fn(help='"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containers', type=d.T.array)]), - withContainersMixin(containers): { spec+: { template+: { spec+: { containers+: if std.isArray(v=containers) then containers else [containers] } } } }, - '#withDnsPolicy':: d.fn(help="\"Set DNS policy for the pod. Defaults to \\\"ClusterFirst\\\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\"", args=[d.arg(name='dnsPolicy', type=d.T.string)]), - withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, - '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), - withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, - '#withHostAliasesMixin':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='hostAliases', type=d.T.array)]), - withHostAliasesMixin(hostAliases): { spec+: { template+: { spec+: { hostAliases+: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, - '#withHostIPC':: d.fn(help="\"Use the host's ipc namespace. Optional: Default to false.\"", args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { template+: { spec+: { hostIPC: hostIPC } } } }, - '#withHostNetwork':: d.fn(help="\"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.\"", args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, - '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, - '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), - withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, - '#withInitContainersMixin':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainers', type=d.T.array)]), - withInitContainersMixin(initContainers): { spec+: { template+: { spec+: { initContainers+: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, - '#withNodeName':: d.fn(help='"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { template+: { spec+: { nodeName: nodeName } } } }, - '#withNodeSelector':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, - '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), - withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), - withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, - '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), - withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, - '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), - withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), - withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), - withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), - withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, - '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), - withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, - '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), - withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, - '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), - withServiceAccountName(serviceAccountName): { spec+: { template+: { spec+: { serviceAccountName: serviceAccountName } } } }, - '#withSetHostnameAsFQDN':: d.fn(help="\"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.\"", args=[d.arg(name='setHostnameAsFQDN', type=d.T.boolean)]), - withSetHostnameAsFQDN(setHostnameAsFQDN): { spec+: { template+: { spec+: { setHostnameAsFQDN: setHostnameAsFQDN } } } }, - '#withShareProcessNamespace':: d.fn(help='"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false."', args=[d.arg(name='shareProcessNamespace', type=d.T.boolean)]), - withShareProcessNamespace(shareProcessNamespace): { spec+: { template+: { spec+: { shareProcessNamespace: shareProcessNamespace } } } }, - '#withSubdomain':: d.fn(help='"If specified, the fully qualified Pod hostname will be \\"...svc.\\". If not specified, the pod will not have a domainname at all."', args=[d.arg(name='subdomain', type=d.T.string)]), - withSubdomain(subdomain): { spec+: { template+: { spec+: { subdomain: subdomain } } } }, - '#withTerminationGracePeriodSeconds':: d.fn(help='"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds."', args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), - withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { spec+: { template+: { spec+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } } } }, - '#withTolerations':: d.fn(help="\"If specified, the pod's tolerations.\"", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { template+: { spec+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } }, - '#withTolerationsMixin':: d.fn(help="\"If specified, the pod's tolerations.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { template+: { spec+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } } }, - '#withTopologySpreadConstraints':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraints(topologySpreadConstraints): { spec+: { template+: { spec+: { topologySpreadConstraints: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } }, - '#withTopologySpreadConstraintsMixin':: d.fn(help='"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologySpreadConstraints', type=d.T.array)]), - withTopologySpreadConstraintsMixin(topologySpreadConstraints): { spec+: { template+: { spec+: { topologySpreadConstraints+: if std.isArray(v=topologySpreadConstraints) then topologySpreadConstraints else [topologySpreadConstraints] } } } }, - '#withVolumes':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { template+: { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } } } }, - '#withVolumesMixin':: d.fn(help='"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { template+: { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } } } }, - }, - }, - '#withActiveDeadlineSeconds':: d.fn(help='"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), - withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, - '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), - withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), - withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), - withCompletions(completions): { spec+: { completions: completions } }, - '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), - withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, - '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), - withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), - withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), - withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet deleted file mode 100644 index e9498f60d44..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - cronJob: (import 'cronJob.libsonnet'), - cronJobSpec: (import 'cronJobSpec.libsonnet'), - cronJobStatus: (import 'cronJobStatus.libsonnet'), - jobTemplateSpec: (import 'jobTemplateSpec.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet deleted file mode 100644 index b0e900695eb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequest.libsonnet +++ /dev/null @@ -1,79 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequest', url='', help='"Describes a certificate signing request"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CertificateSigningRequest', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'certificates.k8s.io/v1beta1', - kind: 'CertificateSigningRequest', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users."'), - spec: { - '#withExtra':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { spec+: { extra: extra } }, - '#withExtraMixin':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { spec+: { extra+: extra } }, - '#withGroups':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { spec+: { groups: if std.isArray(v=groups) then groups else [groups] } }, - '#withGroupsMixin':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { spec+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, - '#withRequest':: d.fn(help='"Base64-encoded PKCS#10 CSR data"', args=[d.arg(name='request', type=d.T.string)]), - withRequest(request): { spec+: { request: request } }, - '#withSignerName':: d.fn(help="\"Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\\n 1. If it's a kubelet client certificate, it is assigned\\n \\\"kubernetes.io/kube-apiserver-client-kubelet\\\".\\n 2. If it's a kubelet serving certificate, it is assigned\\n \\\"kubernetes.io/kubelet-serving\\\".\\n 3. Otherwise, it is assigned \\\"kubernetes.io/legacy-unknown\\\".\\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.\"", args=[d.arg(name='signerName', type=d.T.string)]), - withSignerName(signerName): { spec+: { signerName: signerName } }, - '#withUid':: d.fn(help='"UID information about the requesting user. See user.Info interface for details."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { uid: uid } }, - '#withUsages':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\', args=[d.arg(name='usages', type=d.T.array)]), - withUsages(usages): { spec+: { usages: if std.isArray(v=usages) then usages else [usages] } }, - '#withUsagesMixin':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='usages', type=d.T.array)]), - withUsagesMixin(usages): { spec+: { usages+: if std.isArray(v=usages) then usages else [usages] } }, - '#withUsername':: d.fn(help='"Information about the requesting user. See user.Info interface for details."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { spec+: { username: username } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet deleted file mode 100644 index bd83485168a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestCondition.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestCondition', url='', help=''), - '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), - withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withLastUpdateTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastUpdateTime', type=d.T.string)]), - withLastUpdateTime(lastUpdateTime): { lastUpdateTime: lastUpdateTime }, - '#withMessage':: d.fn(help='"human readable message with details about the request state"', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withReason':: d.fn(help='"brief reason for the request state"', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"type of the condition. Known conditions include \\"Approved\\", \\"Denied\\", and \\"Failed\\"."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet deleted file mode 100644 index 42034432592..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestSpec.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestSpec', url='', help='"This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users."'), - '#withExtra':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."', args=[d.arg(name='extra', type=d.T.object)]), - withExtra(extra): { extra: extra }, - '#withExtraMixin':: d.fn(help='"Extra information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), - withExtraMixin(extra): { extra+: extra }, - '#withGroups':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."', args=[d.arg(name='groups', type=d.T.array)]), - withGroups(groups): { groups: if std.isArray(v=groups) then groups else [groups] }, - '#withGroupsMixin':: d.fn(help='"Group information about the requesting user. See user.Info interface for details."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), - withGroupsMixin(groups): { groups+: if std.isArray(v=groups) then groups else [groups] }, - '#withRequest':: d.fn(help='"Base64-encoded PKCS#10 CSR data"', args=[d.arg(name='request', type=d.T.string)]), - withRequest(request): { request: request }, - '#withSignerName':: d.fn(help="\"Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\\n 1. If it's a kubelet client certificate, it is assigned\\n \\\"kubernetes.io/kube-apiserver-client-kubelet\\\".\\n 2. If it's a kubelet serving certificate, it is assigned\\n \\\"kubernetes.io/kubelet-serving\\\".\\n 3. Otherwise, it is assigned \\\"kubernetes.io/legacy-unknown\\\".\\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.\"", args=[d.arg(name='signerName', type=d.T.string)]), - withSignerName(signerName): { signerName: signerName }, - '#withUid':: d.fn(help='"UID information about the requesting user. See user.Info interface for details."', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { uid: uid }, - '#withUsages':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\', args=[d.arg(name='usages', type=d.T.array)]), - withUsages(usages): { usages: if std.isArray(v=usages) then usages else [usages] }, - '#withUsagesMixin':: d.fn(help='"allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\\nValid values are:\\n \\"signing\\",\\n \\"digital signature\\",\\n \\"content commitment\\",\\n \\"key encipherment\\",\\n \\"key agreement\\",\\n \\"data encipherment\\",\\n \\"cert sign\\",\\n \\"crl sign\\",\\n \\"encipher only\\",\\n \\"decipher only\\",\\n \\"any\\",\\n \\"server auth\\",\\n \\"client auth\\",\\n \\"code signing\\",\\n \\"email protection\\",\\n \\"s/mime\\",\\n \\"ipsec end system\\",\\n \\"ipsec tunnel\\",\\n \\"ipsec user\\",\\n \\"timestamping\\",\\n \\"ocsp signing\\",\\n \\"microsoft sgc\\",\\n \\"netscape sgc\\\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='usages', type=d.T.array)]), - withUsagesMixin(usages): { usages+: if std.isArray(v=usages) then usages else [usages] }, - '#withUsername':: d.fn(help='"Information about the requesting user. See user.Info interface for details."', args=[d.arg(name='username', type=d.T.string)]), - withUsername(username): { username: username }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet deleted file mode 100644 index 0a36898fd8a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/certificateSigningRequestStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='certificateSigningRequestStatus', url='', help=''), - '#withCertificate':: d.fn(help='"If request was approved, the controller will place the issued certificate here."', args=[d.arg(name='certificate', type=d.T.string)]), - withCertificate(certificate): { certificate: certificate }, - '#withConditions':: d.fn(help='"Conditions applied to the request, such as approval or denial."', args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help='"Conditions applied to the request, such as approval or denial."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet deleted file mode 100644 index 0e09e4b298a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1beta1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - certificateSigningRequest: (import 'certificateSigningRequest.libsonnet'), - certificateSigningRequestCondition: (import 'certificateSigningRequestCondition.libsonnet'), - certificateSigningRequestSpec: (import 'certificateSigningRequestSpec.libsonnet'), - certificateSigningRequestStatus: (import 'certificateSigningRequestStatus.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet deleted file mode 100644 index 3f501306ad4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/leaseSpec.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='leaseSpec', url='', help='"LeaseSpec is a specification of a Lease."'), - '#withAcquireTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='acquireTime', type=d.T.string)]), - withAcquireTime(acquireTime): { acquireTime: acquireTime }, - '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), - withHolderIdentity(holderIdentity): { holderIdentity: holderIdentity }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), - withLeaseDurationSeconds(leaseDurationSeconds): { leaseDurationSeconds: leaseDurationSeconds }, - '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), - withLeaseTransitions(leaseTransitions): { leaseTransitions: leaseTransitions }, - '#withRenewTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='renewTime', type=d.T.string)]), - withRenewTime(renewTime): { renewTime: renewTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet deleted file mode 100644 index 3f501306ad4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='leaseSpec', url='', help='"LeaseSpec is a specification of a Lease."'), - '#withAcquireTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='acquireTime', type=d.T.string)]), - withAcquireTime(acquireTime): { acquireTime: acquireTime }, - '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), - withHolderIdentity(holderIdentity): { holderIdentity: holderIdentity }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), - withLeaseDurationSeconds(leaseDurationSeconds): { leaseDurationSeconds: leaseDurationSeconds }, - '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), - withLeaseTransitions(leaseTransitions): { leaseTransitions: leaseTransitions }, - '#withRenewTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='renewTime', type=d.T.string)]), - withRenewTime(renewTime): { renewTime: renewTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet deleted file mode 100644 index 12f30209845..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/main.libsonnet +++ /dev/null @@ -1,6 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - lease: (import 'lease.libsonnet'), - leaseSpec: (import 'leaseSpec.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet deleted file mode 100644 index c89b3f3aa6f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='awsElasticBlockStoreVolumeSource', url='', help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { volumeID: volumeID }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet deleted file mode 100644 index 3a6f4e8c227..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureDiskVolumeSource.libsonnet +++ /dev/null @@ -1,18 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureDiskVolumeSource', url='', help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { cachingMode: cachingMode }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { diskName: diskName }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { diskURI: diskURI }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet deleted file mode 100644 index abb1082ef8b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureFilePersistentVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { secretNamespace: secretNamespace }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { shareName: shareName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet deleted file mode 100644 index 14b0454c3c1..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/azureFileVolumeSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='azureFileVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { shareName: shareName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet deleted file mode 100644 index 4ac47d531b2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,25 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cephFSPersistentVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { secretFile: secretFile }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet deleted file mode 100644 index d9ad4242284..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cephFSVolumeSource.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='cephFSVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { secretFile: secretFile }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet deleted file mode 100644 index 600c4bef519..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapProjection.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapProjection', url='', help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet deleted file mode 100644 index e6cfbc58052..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapVolumeSource.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapVolumeSource', url='', help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet deleted file mode 100644 index 300afde2151..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeFile.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='downwardAPIVolumeFile', url='', help='"DownwardAPIVolumeFile represents information to create the file containing the pod field"'), - '#fieldRef':: d.obj(help='"ObjectFieldSelector selects an APIVersioned field of an object."'), - fieldRef: { - '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, - }, - '#resourceFieldRef':: d.obj(help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), - resourceFieldRef: { - '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), - withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), - withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, - '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resourceFieldRef+: { resource: resource } }, - }, - '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), - withMode(mode): { mode: mode }, - '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet deleted file mode 100644 index a434c5676c4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/emptyDirVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='emptyDirVolumeSource', url='', help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), - '#withMedium':: d.fn(help="\"What type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), - withMedium(medium): { medium: medium }, - '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), - withSizeLimit(sizeLimit): { sizeLimit: sizeLimit }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet deleted file mode 100644 index 5befc0a18cb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort is a tuple that describes a single port."'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet deleted file mode 100644 index 3fb419cb7d5..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainers.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ephemeralContainers', url='', help='"A list of ephemeral containers used with the Pod ephemeralcontainers subresource."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of EphemeralContainers', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'v1', - kind: 'EphemeralContainers', - } + self.metadata.withName(name=name), - '#withEphemeralContainers':: d.fn(help='"A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified."', args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainers(ephemeralContainers): { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#withEphemeralContainersMixin':: d.fn(help='"A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainers', type=d.T.array)]), - withEphemeralContainersMixin(ephemeralContainers): { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet deleted file mode 100644 index d8e0a809fcf..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/fcVolumeSource.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='fcVolumeSource', url='', help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { wwids: if std.isArray(v=wwids) then wwids else [wwids] }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { wwids+: if std.isArray(v=wwids) then wwids else [wwids] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet deleted file mode 100644 index 4ca3872013f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='gcePersistentDiskVolumeSource', url='', help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { partition: partition }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { pdName: pdName }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet deleted file mode 100644 index f0640bd1024..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,35 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='iscsiPersistentVolumeSource', url='', help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { initiatorName: initiatorName }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { iqn: iqn }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { targetPortal: targetPortal }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet deleted file mode 100644 index d834c1db0e6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/iscsiVolumeSource.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='iscsiVolumeSource', url='', help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { initiatorName: initiatorName }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { iqn: iqn }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { lun: lun }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { targetPortal: targetPortal }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet deleted file mode 100644 index 5eb6b89ac72..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/keyToPath.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='keyToPath', url='', help='"Maps a string key to a path within a volume."'), - '#withKey':: d.fn(help='"The key to project."', args=[d.arg(name='key', type=d.T.string)]), - withKey(key): { key: key }, - '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), - withMode(mode): { mode: mode }, - '#withPath':: d.fn(help="\"The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet deleted file mode 100644 index e5a0a1ece98..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='localVolumeSource', url='', help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet deleted file mode 100644 index 36b3fe45775..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimStatus.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeClaimStatus', url='', help='"PersistentVolumeClaimStatus is the current status of a persistent volume claim."'), - '#withAccessModes':: d.fn(help='"AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withCapacity':: d.fn(help='"Represents the actual resources of the underlying volume."', args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { capacity: capacity }, - '#withCapacityMixin':: d.fn(help='"Represents the actual resources of the underlying volume."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { capacity+: capacity }, - '#withConditions':: d.fn(help="\"Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"", args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help="\"Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withPhase':: d.fn(help='"Phase represents the current phase of PersistentVolumeClaim."', args=[d.arg(name='phase', type=d.T.string)]), - withPhase(phase): { phase: phase }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet deleted file mode 100644 index 6e68b8c42a2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeStatus.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeStatus', url='', help='"PersistentVolumeStatus is the current status of a persistent volume."'), - '#withMessage':: d.fn(help='"A human-readable message indicating details about why the volume is in this state."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withPhase':: d.fn(help='"Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase"', args=[d.arg(name='phase', type=d.T.string)]), - withPhase(phase): { phase: phase }, - '#withReason':: d.fn(help='"Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet deleted file mode 100644 index b05e0d0eaec..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='photonPersistentDiskVolumeSource', url='', help='"Represents a Photon Controller persistent disk resource."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { pdID: pdID }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet deleted file mode 100644 index 08e93f9fc9e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podIP.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podIP', url='', help='"IP address information for entries in the (plural) PodIPs field. Each entry includes:\\n IP: An IP address allocated to the pod. Routable at least within the cluster."'), - '#withIp':: d.fn(help='"ip is an IP address (IPv4 or IPv6) assigned to the pod"', args=[d.arg(name='ip', type=d.T.string)]), - withIp(ip): { ip: ip }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet deleted file mode 100644 index 4c01eb71937..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/projectedVolumeSource.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='projectedVolumeSource', url='', help='"Represents a projected volume source"'), - '#withDefaultMode':: d.fn(help='"Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withSources':: d.fn(help='"list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), - withSources(sources): { sources: if std.isArray(v=sources) then sources else [sources] }, - '#withSourcesMixin':: d.fn(help='"list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), - withSourcesMixin(sources): { sources+: if std.isArray(v=sources) then sources else [sources] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet deleted file mode 100644 index 4163da234a4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rbdPersistentVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { image: image }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { keyring: keyring }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { pool: pool }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet deleted file mode 100644 index cba0fe8c954..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/rbdVolumeSource.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='rbdVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { image: image }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { keyring: keyring }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { pool: pool }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { user: user }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet deleted file mode 100644 index 460a6016386..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceFieldSelector.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceFieldSelector', url='', help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), - '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), - withContainerName(containerName): { containerName: containerName }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), - withDivisor(divisor): { divisor: divisor }, - '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), - withResource(resource): { resource: resource }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet deleted file mode 100644 index a3a52807f06..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet +++ /dev/null @@ -1,31 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleIOPersistentVolumeSource', url='', help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { secretRef+: { namespace: namespace } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { gateway: gateway }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { storageMode: storageMode }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { storagePool: storagePool }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { system: system }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { volumeName: volumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet deleted file mode 100644 index 376c45a382d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scaleIOVolumeSource.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scaleIOVolumeSource', url='', help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), - '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), - secretRef: { - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secretRef+: { name: name } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { gateway: gateway }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { storageMode: storageMode }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { storagePool: storagePool }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { system: system }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { volumeName: volumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet deleted file mode 100644 index 3750b1ee35f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretProjection.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='secretProjection', url='', help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet deleted file mode 100644 index 1097514d560..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretVolumeSource.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='secretVolumeSource', url='', help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), - withDefaultMode(defaultMode): { defaultMode: defaultMode }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { items: if std.isArray(v=items) then items else [items] }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { optional: optional }, - '#withSecretName':: d.fn(help="\"Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet deleted file mode 100644 index 0b27df18641..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySpreadConstraint.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='topologySpreadConstraint', url='', help='"TopologySpreadConstraint specifies how to spread matching pods among the given topology."'), - '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - labelSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, - }, - '#withMaxSkew':: d.fn(help="\"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.\"", args=[d.arg(name='maxSkew', type=d.T.integer)]), - withMaxSkew(maxSkew): { maxSkew: maxSkew }, - '#withTopologyKey':: d.fn(help="\"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \\\"bucket\\\", and try to put balanced number of pods into each bucket. It's a required field.\"", args=[d.arg(name='topologyKey', type=d.T.string)]), - withTopologyKey(topologyKey): { topologyKey: topologyKey }, - '#withWhenUnsatisfiable':: d.fn(help="\"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\\n but giving higher precedence to topologies that would help reduce the\\n skew.\\nA constraint is considered \\\"Unsatisfiable\\\" for an incoming pod if and only if every possible node assigment for that pod would violate \\\"MaxSkew\\\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\"", args=[d.arg(name='whenUnsatisfiable', type=d.T.string)]), - withWhenUnsatisfiable(whenUnsatisfiable): { whenUnsatisfiable: whenUnsatisfiable }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet deleted file mode 100644 index 43fafb15bf7..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeProjection.libsonnet +++ /dev/null @@ -1,44 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeProjection', url='', help='"Projection that may be projected along with other supported volume types"'), - '#configMap':: d.obj(help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), - configMap: { - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { configMap+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { configMap+: { optional: optional } }, - }, - '#downwardAPI':: d.obj(help='"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode."'), - downwardAPI: { - '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, - }, - '#secret':: d.obj(help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), - secret: { - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), - withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), - withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { secret+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), - withOptional(optional): { secret+: { optional: optional } }, - }, - '#serviceAccountToken':: d.obj(help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), - serviceAccountToken: { - '#withAudience':: d.fn(help='"Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), - withAudience(audience): { serviceAccountToken+: { audience: audience } }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), - withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, - '#withPath':: d.fn(help='"Path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { serviceAccountToken+: { path: path } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet deleted file mode 100644 index 5e742debbfe..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='vsphereVirtualDiskVolumeSource', url='', help='"Represents a vSphere volume resource."'), - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { fsType: fsType }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { storagePolicyID: storagePolicyID }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { storagePolicyName: storagePolicyName }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { volumePath: volumePath }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet deleted file mode 100644 index 9a99830684d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointConditions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointConditions', url='', help='"EndpointConditions represents the current condition of an endpoint."'), - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), - withReady(ready): { ready: ready }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), - withServing(serving): { serving: serving }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), - withTerminating(terminating): { terminating: terminating }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet deleted file mode 100644 index 28e60c00971..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet deleted file mode 100644 index dadb81aed84..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/forZone.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='forZone', url='', help='"ForZone provides information about which zones should consume this endpoint."'), - '#withName':: d.fn(help='"name represents the name of the zone."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet deleted file mode 100644 index 7f2ac440358..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpoint.libsonnet +++ /dev/null @@ -1,51 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpoint', url='', help='"Endpoint represents a single logical \\"backend\\" implementing a service."'), - '#conditions':: d.obj(help='"EndpointConditions represents the current condition of an endpoint."'), - conditions: { - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), - withReady(ready): { conditions+: { ready: ready } }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), - withServing(serving): { conditions+: { serving: serving } }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), - withTerminating(terminating): { conditions+: { terminating: terminating } }, - }, - '#hints':: d.obj(help='"EndpointHints provides hints describing how an endpoint should be consumed."'), - hints: { - '#withForZones':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."', args=[d.arg(name='forZones', type=d.T.array)]), - withForZones(forZones): { hints+: { forZones: if std.isArray(v=forZones) then forZones else [forZones] } }, - '#withForZonesMixin':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forZones', type=d.T.array)]), - withForZonesMixin(forZones): { hints+: { forZones+: if std.isArray(v=forZones) then forZones else [forZones] } }, - }, - '#targetRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - targetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { targetRef+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { targetRef+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { targetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { targetRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { targetRef+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { targetRef+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { targetRef+: { uid: uid } }, - }, - '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."', args=[d.arg(name='addresses', type=d.T.array)]), - withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), - withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withHostname':: d.fn(help='"hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation."', args=[d.arg(name='hostname', type=d.T.string)]), - withHostname(hostname): { hostname: hostname }, - '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#withTopology':: d.fn(help='"topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\\n where the endpoint is located. This should match the corresponding\\n node label.\\n* topology.kubernetes.io/zone: the value indicates the zone where the\\n endpoint is located. This should match the corresponding node label.\\n* topology.kubernetes.io/region: the value indicates the region where the\\n endpoint is located. This should match the corresponding node label.\\nThis field is deprecated and will be removed in future api versions."', args=[d.arg(name='topology', type=d.T.object)]), - withTopology(topology): { topology: topology }, - '#withTopologyMixin':: d.fn(help='"topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\\n where the endpoint is located. This should match the corresponding\\n node label.\\n* topology.kubernetes.io/zone: the value indicates the zone where the\\n endpoint is located. This should match the corresponding node label.\\n* topology.kubernetes.io/region: the value indicates the region where the\\n endpoint is located. This should match the corresponding node label.\\nThis field is deprecated and will be removed in future api versions."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topology', type=d.T.object)]), - withTopologyMixin(topology): { topology+: topology }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet deleted file mode 100644 index 9a99830684d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointConditions', url='', help='"EndpointConditions represents the current condition of an endpoint."'), - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), - withReady(ready): { ready: ready }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), - withServing(serving): { serving: serving }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), - withTerminating(terminating): { terminating: terminating }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet deleted file mode 100644 index 5a08f1eba3e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointHints.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointHints', url='', help='"EndpointHints provides hints describing how an endpoint should be consumed."'), - '#withForZones':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."', args=[d.arg(name='forZones', type=d.T.array)]), - withForZones(forZones): { forZones: if std.isArray(v=forZones) then forZones else [forZones] }, - '#withForZonesMixin':: d.fn(help='"forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forZones', type=d.T.array)]), - withForZonesMixin(forZones): { forZones+: if std.isArray(v=forZones) then forZones else [forZones] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet deleted file mode 100644 index 28e60c00971..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointPort.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol."', args=[d.arg(name='appProtocol', type=d.T.string)]), - withAppProtocol(appProtocol): { appProtocol: appProtocol }, - '#withName':: d.fn(help="\"The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withPort':: d.fn(help='"The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet deleted file mode 100644 index 5e5020c85ef..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointSlice.libsonnet +++ /dev/null @@ -1,66 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointSlice', url='', help='"EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of EndpointSlice', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'discovery.k8s.io/v1beta1', - kind: 'EndpointSlice', - } + self.metadata.withName(name=name), - '#withAddressType':: d.fn(help='"addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name."', args=[d.arg(name='addressType', type=d.T.string)]), - withAddressType(addressType): { addressType: addressType }, - '#withEndpoints':: d.fn(help='"endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints."', args=[d.arg(name='endpoints', type=d.T.array)]), - withEndpoints(endpoints): { endpoints: if std.isArray(v=endpoints) then endpoints else [endpoints] }, - '#withEndpointsMixin':: d.fn(help='"endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='endpoints', type=d.T.array)]), - withEndpointsMixin(endpoints): { endpoints+: if std.isArray(v=endpoints) then endpoints else [endpoints] }, - '#withPorts':: d.fn(help='"ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \\"all ports\\". Each slice may include a maximum of 100 ports."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \\"all ports\\". Each slice may include a maximum of 100 ports."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet deleted file mode 100644 index dadb81aed84..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='forZone', url='', help='"ForZone provides information about which zones should consume this endpoint."'), - '#withName':: d.fn(help='"name represents the name of the zone."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet deleted file mode 100644 index ee824bf0c7c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/main.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - endpoint: (import 'endpoint.libsonnet'), - endpointConditions: (import 'endpointConditions.libsonnet'), - endpointHints: (import 'endpointHints.libsonnet'), - endpointPort: (import 'endpointPort.libsonnet'), - endpointSlice: (import 'endpointSlice.libsonnet'), - forZone: (import 'forZone.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet deleted file mode 100644 index e94f5caacdc..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/event.libsonnet +++ /dev/null @@ -1,124 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='event', url='', help='"Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data."'), - '#deprecatedSource':: d.obj(help='"EventSource contains information for an event."'), - deprecatedSource: { - '#withComponent':: d.fn(help='"Component from which the event is generated."', args=[d.arg(name='component', type=d.T.string)]), - withComponent(component): { deprecatedSource+: { component: component } }, - '#withHost':: d.fn(help='"Node name on which the event is generated."', args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { deprecatedSource+: { host: host } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'events.k8s.io/v1beta1', - kind: 'Event', - } + self.metadata.withName(name=name), - '#regarding':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - regarding: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { regarding+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { regarding+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { regarding+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { regarding+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { regarding+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { regarding+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { regarding+: { uid: uid } }, - }, - '#related':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - related: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { related+: { apiVersion: apiVersion } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { related+: { fieldPath: fieldPath } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { related+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { related+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { related+: { namespace: namespace } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { related+: { resourceVersion: resourceVersion } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { related+: { uid: uid } }, - }, - '#series':: d.obj(help='"EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time."'), - series: { - '#withCount':: d.fn(help='"count is the number of occurrences in this series up to the last heartbeat time."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { series+: { count: count } }, - '#withLastObservedTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='lastObservedTime', type=d.T.string)]), - withLastObservedTime(lastObservedTime): { series+: { lastObservedTime: lastObservedTime } }, - }, - '#withAction':: d.fn(help='"action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters."', args=[d.arg(name='action', type=d.T.string)]), - withAction(action): { action: action }, - '#withDeprecatedCount':: d.fn(help='"deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type."', args=[d.arg(name='deprecatedCount', type=d.T.integer)]), - withDeprecatedCount(deprecatedCount): { deprecatedCount: deprecatedCount }, - '#withDeprecatedFirstTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deprecatedFirstTimestamp', type=d.T.string)]), - withDeprecatedFirstTimestamp(deprecatedFirstTimestamp): { deprecatedFirstTimestamp: deprecatedFirstTimestamp }, - '#withDeprecatedLastTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deprecatedLastTimestamp', type=d.T.string)]), - withDeprecatedLastTimestamp(deprecatedLastTimestamp): { deprecatedLastTimestamp: deprecatedLastTimestamp }, - '#withEventTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='eventTime', type=d.T.string)]), - withEventTime(eventTime): { eventTime: eventTime }, - '#withNote':: d.fn(help='"note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB."', args=[d.arg(name='note', type=d.T.string)]), - withNote(note): { note: note }, - '#withReason':: d.fn(help='"reason is why the action was taken. It is human-readable. This field can have at most 128 characters."', args=[d.arg(name='reason', type=d.T.string)]), - withReason(reason): { reason: reason }, - '#withReportingController':: d.fn(help='"reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events."', args=[d.arg(name='reportingController', type=d.T.string)]), - withReportingController(reportingController): { reportingController: reportingController }, - '#withReportingInstance':: d.fn(help='"reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters."', args=[d.arg(name='reportingInstance', type=d.T.string)]), - withReportingInstance(reportingInstance): { reportingInstance: reportingInstance }, - '#withType':: d.fn(help='"type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet deleted file mode 100644 index 9cdc01a276f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/eventSeries.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='eventSeries', url='', help='"EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time."'), - '#withCount':: d.fn(help='"count is the number of occurrences in this series up to the last heartbeat time."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#withLastObservedTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='lastObservedTime', type=d.T.string)]), - withLastObservedTime(lastObservedTime): { lastObservedTime: lastObservedTime }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet deleted file mode 100644 index 34c50897a1c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1beta1/main.libsonnet +++ /dev/null @@ -1,6 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - event: (import 'event.libsonnet'), - eventSeries: (import 'eventSeries.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet deleted file mode 100644 index d67bd0cf44e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/main.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='extensions', url='', help=''), - v1beta1: (import 'v1beta1/main.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet deleted file mode 100644 index 2801551ab04..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressPath.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressPath', url='', help='"HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types. Defaults to ImplementationSpecific.\"", args=[d.arg(name='pathType', type=d.T.string)]), - withPathType(pathType): { pathType: pathType }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet deleted file mode 100644 index 572a5508f32..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingress.libsonnet +++ /dev/null @@ -1,85 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'extensions/v1beta1', - kind: 'Ingress', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressSpec describes the Ingress the user wishes to exist."'), - spec: { - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { backend+: { resource+: { apiGroup: apiGroup } } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { backend+: { resource+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { backend+: { resource+: { name: name } } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { spec+: { backend+: { serviceName: serviceName } } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { spec+: { backend+: { servicePort: servicePort } } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet deleted file mode 100644 index ee01f37e01d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressBackend.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressBackend', url='', help='"IngressBackend describes all endpoints for a given service and port."'), - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { resource+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { resource+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { serviceName: serviceName }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { servicePort: servicePort }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet deleted file mode 100644 index ebba87ea6bf..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressRule.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), - '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, - }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { host: host }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet deleted file mode 100644 index 35156d74036..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressSpec.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressSpec', url='', help='"IngressSpec describes the Ingress the user wishes to exist."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet deleted file mode 100644 index 21adb5950ec..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet deleted file mode 100644 index c12f3f9af7c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - httpIngressPath: (import 'httpIngressPath.libsonnet'), - httpIngressRuleValue: (import 'httpIngressRuleValue.libsonnet'), - ingress: (import 'ingress.libsonnet'), - ingressBackend: (import 'ingressBackend.libsonnet'), - ingressRule: (import 'ingressRule.libsonnet'), - ingressSpec: (import 'ingressSpec.libsonnet'), - ingressStatus: (import 'ingressStatus.libsonnet'), - ingressTLS: (import 'ingressTLS.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet deleted file mode 100644 index 7b00ffa7b5d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfigurationSpec.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), - '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), - limited: { - '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), - limitResponse: { - '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), - queuing: { - '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), - withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, - '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), - withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, - '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), - withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, - }, - '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { limited+: { limitResponse+: { type: type } } }, - }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { limited+: { assuredConcurrencyShares: assuredConcurrencyShares } }, - }, - '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/watchEvent.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/watchEvent.libsonnet deleted file mode 100644 index cba2b6f4a0f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/meta/v1/watchEvent.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='watchEvent', url='', help='"Event represents a single event to a watched resource."'), - '#new':: d.fn(help='new returns an instance of WatchEvent', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'WatchEvent', - } + self.metadata.withName(name=name), - '#withObject':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='object', type=d.T.object)]), - withObject(object): { object: object }, - '#withObjectMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='object', type=d.T.object)]), - withObjectMixin(object): { object+: object }, - '#withType':: d.fn(help='', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { type: type }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet deleted file mode 100644 index a54d695b4fc..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressRuleValue.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet deleted file mode 100644 index b1ed8b58500..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassParametersReference.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClassParametersReference', url='', help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { scope: scope }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet deleted file mode 100644 index ebbc86576cb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClassSpec', url='', help='"IngressClassSpec provides information about the class of an Ingress."'), - '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { parameters+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { parameters+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { parameters+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { parameters+: { namespace: namespace } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { parameters+: { scope: scope } }, - }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), - withController(controller): { controller: controller }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet deleted file mode 100644 index ebba87ea6bf..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), - '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, - }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { host: host }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet deleted file mode 100644 index 95d59bcef55..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet deleted file mode 100644 index dc95536195d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ipBlock.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ipBlock', url='', help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.1/24\\\",\\\"2001:db9::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), - '#withCidr':: d.fn(help='"CIDR is a string representing the IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), - withCidr(cidr): { cidr: cidr }, - '#withExcept':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"', args=[d.arg(name='except', type=d.T.array)]), - withExcept(except): { except: if std.isArray(v=except) then except else [except] }, - '#withExceptMixin':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), - withExceptMixin(except): { except+: if std.isArray(v=except) then except else [except] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet deleted file mode 100644 index 155450ad6c3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyEgressRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyEgressRule', url='', help="\"NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8\""), - '#withPorts':: d.fn(help='"List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#withTo':: d.fn(help='"List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."', args=[d.arg(name='to', type=d.T.array)]), - withTo(to): { to: if std.isArray(v=to) then to else [to] }, - '#withToMixin':: d.fn(help='"List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='to', type=d.T.array)]), - withToMixin(to): { to+: if std.isArray(v=to) then to else [to] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet deleted file mode 100644 index 9d244ee6819..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyIngressRule.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyIngressRule', url='', help="\"NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.\""), - '#withFrom':: d.fn(help='"List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."', args=[d.arg(name='from', type=d.T.array)]), - withFrom(from): { from: if std.isArray(v=from) then from else [from] }, - '#withFromMixin':: d.fn(help='"List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='from', type=d.T.array)]), - withFromMixin(from): { from+: if std.isArray(v=from) then from else [from] }, - '#withPorts':: d.fn(help='"List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), - withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), - withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet deleted file mode 100644 index 4a23de3ac0c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPort.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicyPort', url='', help='"NetworkPolicyPort describes a port to allow traffic on"'), - '#withEndPort':: d.fn(help='"If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \\"NetworkPolicyEndPort\\"."', args=[d.arg(name='endPort', type=d.T.integer)]), - withEndPort(endPort): { endPort: endPort }, - '#withPort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='port', type=d.T.string)]), - withPort(port): { port: port }, - '#withProtocol':: d.fn(help='"The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP."', args=[d.arg(name='protocol', type=d.T.string)]), - withProtocol(protocol): { protocol: protocol }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet deleted file mode 100644 index cdfadf08597..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicySpec.libsonnet +++ /dev/null @@ -1,29 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='networkPolicySpec', url='', help='"NetworkPolicySpec provides the specification of a NetworkPolicy"'), - '#podSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - podSelector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { podSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { podSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { podSelector+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { podSelector+: { matchLabels+: matchLabels } }, - }, - '#withEgress':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), - withEgress(egress): { egress: if std.isArray(v=egress) then egress else [egress] }, - '#withEgressMixin':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), - withEgressMixin(egress): { egress+: if std.isArray(v=egress) then egress else [egress] }, - '#withIngress':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, - '#withIngressMixin':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, - '#withPolicyTypes':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), - withPolicyTypes(policyTypes): { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, - '#withPolicyTypesMixin':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), - withPolicyTypesMixin(policyTypes): { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet deleted file mode 100644 index 2801551ab04..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressPath.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressPath', url='', help='"HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types. Defaults to ImplementationSpecific.\"", args=[d.arg(name='pathType', type=d.T.string)]), - withPathType(pathType): { pathType: pathType }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet deleted file mode 100644 index a54d695b4fc..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/httpIngressRuleValue.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet deleted file mode 100644 index bd621b40231..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingress.libsonnet +++ /dev/null @@ -1,85 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'networking.k8s.io/v1beta1', - kind: 'Ingress', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressSpec describes the Ingress the user wishes to exist."'), - spec: { - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { backend+: { resource+: { apiGroup: apiGroup } } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { backend+: { resource+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { backend+: { resource+: { name: name } } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { spec+: { backend+: { serviceName: serviceName } } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { spec+: { backend+: { servicePort: servicePort } } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet deleted file mode 100644 index ee01f37e01d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressBackend.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressBackend', url='', help='"IngressBackend describes all endpoints for a given service and port."'), - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { resource+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { resource+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { resource+: { name: name } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { serviceName: serviceName }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { servicePort: servicePort }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet deleted file mode 100644 index 2ade9e9ee52..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClass.libsonnet +++ /dev/null @@ -1,74 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClass', url='', help='"IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of IngressClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'networking.k8s.io/v1beta1', - kind: 'IngressClass', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"IngressClassSpec provides information about the class of an Ingress."'), - spec: { - '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { spec+: { parameters+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { parameters+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { parameters+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { parameters+: { namespace: namespace } } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { spec+: { parameters+: { scope: scope } } }, - }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), - withController(controller): { spec+: { controller: controller } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet deleted file mode 100644 index b1ed8b58500..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClassParametersReference', url='', help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { scope: scope }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet deleted file mode 100644 index ebbc86576cb..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassSpec.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressClassSpec', url='', help='"IngressClassSpec provides information about the class of an Ingress."'), - '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { parameters+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { parameters+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { parameters+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { parameters+: { namespace: namespace } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), - withScope(scope): { parameters+: { scope: scope } }, - }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), - withController(controller): { controller: controller }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet deleted file mode 100644 index ebba87ea6bf..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressRule.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), - '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), - withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), - withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, - }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), - withHost(host): { host: host }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet deleted file mode 100644 index 35156d74036..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressSpec.libsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressSpec', url='', help='"IngressSpec describes the Ingress the user wishes to exist."'), - '#backend':: d.obj(help='"IngressBackend describes all endpoints for a given service and port."'), - backend: { - '#resource':: d.obj(help='"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace."'), - resource: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { backend+: { resource+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { backend+: { resource+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { backend+: { resource+: { name: name } } }, - }, - '#withServiceName':: d.fn(help='"Specifies the name of the referenced service."', args=[d.arg(name='serviceName', type=d.T.string)]), - withServiceName(serviceName): { backend+: { serviceName: serviceName } }, - '#withServicePort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='servicePort', type=d.T.string)]), - withServicePort(servicePort): { backend+: { servicePort: servicePort } }, - }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), - withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), - withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), - withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet deleted file mode 100644 index 7bae060053d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressStatus.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), - '#loadBalancer':: d.obj(help='"LoadBalancerStatus represents the status of a load-balancer."'), - loadBalancer: { - '#withIngress':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."', args=[d.arg(name='ingress', type=d.T.array)]), - withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help='"Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), - withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet deleted file mode 100644 index 95d59bcef55..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressTLS.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an Ingress."'), - '#withHosts':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), - withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withHostsMixin':: d.fn(help='"Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), - withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, - '#withSecretName':: d.fn(help='"SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { secretName: secretName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet deleted file mode 100644 index 798e438d625..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/main.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - httpIngressPath: (import 'httpIngressPath.libsonnet'), - httpIngressRuleValue: (import 'httpIngressRuleValue.libsonnet'), - ingress: (import 'ingress.libsonnet'), - ingressBackend: (import 'ingressBackend.libsonnet'), - ingressClass: (import 'ingressClass.libsonnet'), - ingressClassParametersReference: (import 'ingressClassParametersReference.libsonnet'), - ingressClassSpec: (import 'ingressClassSpec.libsonnet'), - ingressRule: (import 'ingressRule.libsonnet'), - ingressSpec: (import 'ingressSpec.libsonnet'), - ingressStatus: (import 'ingressStatus.libsonnet'), - ingressTLS: (import 'ingressTLS.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet deleted file mode 100644 index fa33b3e39f3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/overhead.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { podFixed+: podFixed }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet deleted file mode 100644 index 066bfbc80b2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/scheduling.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scheduling', url='', help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet deleted file mode 100644 index fc1ded9abfd..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/main.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - overhead: (import 'overhead.libsonnet'), - runtimeClass: (import 'runtimeClass.libsonnet'), - runtimeClassSpec: (import 'runtimeClassSpec.libsonnet'), - scheduling: (import 'scheduling.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet deleted file mode 100644 index fa33b3e39f3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { podFixed+: podFixed }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet deleted file mode 100644 index 30794b543e2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClass.libsonnet +++ /dev/null @@ -1,79 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'node.k8s.io/v1alpha1', - kind: 'RuntimeClass', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable."'), - spec: { - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { spec+: { overhead+: { podFixed: podFixed } } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { spec+: { overhead+: { podFixed+: podFixed } } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { spec+: { scheduling+: { nodeSelector: nodeSelector } } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { spec+: { scheduling+: { nodeSelector+: nodeSelector } } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { spec+: { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { spec+: { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } } }, - }, - '#withRuntimeHandler':: d.fn(help='"RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='runtimeHandler', type=d.T.string)]), - withRuntimeHandler(runtimeHandler): { spec+: { runtimeHandler: runtimeHandler } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet deleted file mode 100644 index 98b1026db37..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/runtimeClassSpec.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClassSpec', url='', help='"RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable."'), - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { scheduling+: { nodeSelector: nodeSelector } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { scheduling+: { nodeSelector+: nodeSelector } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - }, - '#withRuntimeHandler':: d.fn(help='"RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='runtimeHandler', type=d.T.string)]), - withRuntimeHandler(runtimeHandler): { runtimeHandler: runtimeHandler }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet deleted file mode 100644 index 066bfbc80b2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scheduling', url='', help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet deleted file mode 100644 index cbc6521678c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/main.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - overhead: (import 'overhead.libsonnet'), - runtimeClass: (import 'runtimeClass.libsonnet'), - scheduling: (import 'scheduling.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet deleted file mode 100644 index fa33b3e39f3..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/overhead.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { podFixed+: podFixed }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet deleted file mode 100644 index ca5211d7580..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/runtimeClass.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'node.k8s.io/v1beta1', - kind: 'RuntimeClass', - } + self.metadata.withName(name=name), - '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), - overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), - withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, - }, - '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - scheduling: { - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { scheduling+: { nodeSelector: nodeSelector } }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { scheduling+: { nodeSelector+: nodeSelector } }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { scheduling+: { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, - }, - '#withHandler':: d.fn(help='"Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), - withHandler(handler): { handler: handler }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet deleted file mode 100644 index 066bfbc80b2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1beta1/scheduling.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='scheduling', url='', help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), - '#withNodeSelector':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, - '#withNodeSelectorMixin':: d.fn(help="\"nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), - withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withTolerations':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerations(tolerations): { tolerations: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), - withTolerationsMixin(tolerations): { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet deleted file mode 100644 index 2fa7a69484f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudgetStatus', url='', help='"PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system."'), - '#withConditions':: d.fn(help="\"Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\\n the number of allowed disruptions. Therefore no disruptions are\\n allowed and the status of the condition will be False.\\n- InsufficientPods: The number of pods are either at or below the number\\n required by the PodDisruptionBudget. No disruptions are\\n allowed and the status of the condition will be False.\\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\\n The condition will be True, and the number of allowed\\n disruptions are provided by the disruptionsAllowed property.\"", args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help="\"Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\\n the number of allowed disruptions. Therefore no disruptions are\\n allowed and the status of the condition will be False.\\n- InsufficientPods: The number of pods are either at or below the number\\n required by the PodDisruptionBudget. No disruptions are\\n allowed and the status of the condition will be False.\\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\\n The condition will be True, and the number of allowed\\n disruptions are provided by the disruptionsAllowed property.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withCurrentHealthy':: d.fn(help='"current number of healthy pods"', args=[d.arg(name='currentHealthy', type=d.T.integer)]), - withCurrentHealthy(currentHealthy): { currentHealthy: currentHealthy }, - '#withDesiredHealthy':: d.fn(help='"minimum desired number of healthy pods"', args=[d.arg(name='desiredHealthy', type=d.T.integer)]), - withDesiredHealthy(desiredHealthy): { desiredHealthy: desiredHealthy }, - '#withDisruptedPods':: d.fn(help="\"DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.\"", args=[d.arg(name='disruptedPods', type=d.T.object)]), - withDisruptedPods(disruptedPods): { disruptedPods: disruptedPods }, - '#withDisruptedPodsMixin':: d.fn(help="\"DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='disruptedPods', type=d.T.object)]), - withDisruptedPodsMixin(disruptedPods): { disruptedPods+: disruptedPods }, - '#withDisruptionsAllowed':: d.fn(help='"Number of pod disruptions that are currently allowed."', args=[d.arg(name='disruptionsAllowed', type=d.T.integer)]), - withDisruptionsAllowed(disruptionsAllowed): { disruptionsAllowed: disruptionsAllowed }, - '#withExpectedPods':: d.fn(help='"total number of pods counted by this disruption budget"', args=[d.arg(name='expectedPods', type=d.T.integer)]), - withExpectedPods(expectedPods): { expectedPods: expectedPods }, - '#withObservedGeneration':: d.fn(help="\"Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.\"", args=[d.arg(name='observedGeneration', type=d.T.integer)]), - withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet deleted file mode 100644 index 70703dbba0a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedCSIDriver.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedCSIDriver', url='', help='"AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used."'), - '#withName':: d.fn(help='"Name is the registered name of the CSI driver"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet deleted file mode 100644 index ed4e7b9f827..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedFlexVolume.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedFlexVolume', url='', help='"AllowedFlexVolume represents a single Flexvolume that is allowed to be used."'), - '#withDriver':: d.fn(help='"driver is the name of the Flexvolume driver."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { driver: driver }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet deleted file mode 100644 index 921e10cf0e7..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/allowedHostPath.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='allowedHostPath', url='', help='"AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined."'), - '#withPathPrefix':: d.fn(help='"pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\\n\\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`"', args=[d.arg(name='pathPrefix', type=d.T.string)]), - withPathPrefix(pathPrefix): { pathPrefix: pathPrefix }, - '#withReadOnly':: d.fn(help='"when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { readOnly: readOnly }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet deleted file mode 100644 index 151bae0196c..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/fsGroupStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='fsGroupStrategyOptions', url='', help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet deleted file mode 100644 index a804f6faff0..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/hostPortRange.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='hostPortRange', url='', help='"HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined."'), - '#withMax':: d.fn(help='"max is the end of the range, inclusive."', args=[d.arg(name='max', type=d.T.integer)]), - withMax(max): { max: max }, - '#withMin':: d.fn(help='"min is the start of the range, inclusive."', args=[d.arg(name='min', type=d.T.integer)]), - withMin(min): { min: min }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet deleted file mode 100644 index 902c4bb6395..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/idRange.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='idRange', url='', help='"IDRange provides a min/max of an allowed range of IDs."'), - '#withMax':: d.fn(help='"max is the end of the range, inclusive."', args=[d.arg(name='max', type=d.T.integer)]), - withMax(max): { max: max }, - '#withMin':: d.fn(help='"min is the start of the range, inclusive."', args=[d.arg(name='min', type=d.T.integer)]), - withMin(min): { min: min }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet deleted file mode 100644 index 3b0bb525f0f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/main.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - allowedCSIDriver: (import 'allowedCSIDriver.libsonnet'), - allowedFlexVolume: (import 'allowedFlexVolume.libsonnet'), - allowedHostPath: (import 'allowedHostPath.libsonnet'), - eviction: (import 'eviction.libsonnet'), - fsGroupStrategyOptions: (import 'fsGroupStrategyOptions.libsonnet'), - hostPortRange: (import 'hostPortRange.libsonnet'), - idRange: (import 'idRange.libsonnet'), - podDisruptionBudget: (import 'podDisruptionBudget.libsonnet'), - podDisruptionBudgetSpec: (import 'podDisruptionBudgetSpec.libsonnet'), - podDisruptionBudgetStatus: (import 'podDisruptionBudgetStatus.libsonnet'), - podSecurityPolicy: (import 'podSecurityPolicy.libsonnet'), - podSecurityPolicySpec: (import 'podSecurityPolicySpec.libsonnet'), - runAsGroupStrategyOptions: (import 'runAsGroupStrategyOptions.libsonnet'), - runAsUserStrategyOptions: (import 'runAsUserStrategyOptions.libsonnet'), - runtimeClassStrategyOptions: (import 'runtimeClassStrategyOptions.libsonnet'), - seLinuxStrategyOptions: (import 'seLinuxStrategyOptions.libsonnet'), - supplementalGroupsStrategyOptions: (import 'supplementalGroupsStrategyOptions.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet deleted file mode 100644 index af495f14f7f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudget.libsonnet +++ /dev/null @@ -1,74 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudget', url='', help='"PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods"'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PodDisruptionBudget', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'policy/v1beta1', - kind: 'PodDisruptionBudget', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"PodDisruptionBudgetSpec is a description of a PodDisruptionBudget."'), - spec: { - '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - selector: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { spec+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { spec+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { spec+: { selector+: { matchLabels: matchLabels } } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, - }, - '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), - withMaxUnavailable(maxUnavailable): { spec+: { maxUnavailable: maxUnavailable } }, - '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), - withMinAvailable(minAvailable): { spec+: { minAvailable: minAvailable } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet deleted file mode 100644 index 2fa7a69484f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudgetStatus', url='', help='"PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system."'), - '#withConditions':: d.fn(help="\"Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\\n the number of allowed disruptions. Therefore no disruptions are\\n allowed and the status of the condition will be False.\\n- InsufficientPods: The number of pods are either at or below the number\\n required by the PodDisruptionBudget. No disruptions are\\n allowed and the status of the condition will be False.\\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\\n The condition will be True, and the number of allowed\\n disruptions are provided by the disruptionsAllowed property.\"", args=[d.arg(name='conditions', type=d.T.array)]), - withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withConditionsMixin':: d.fn(help="\"Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\\n the number of allowed disruptions. Therefore no disruptions are\\n allowed and the status of the condition will be False.\\n- InsufficientPods: The number of pods are either at or below the number\\n required by the PodDisruptionBudget. No disruptions are\\n allowed and the status of the condition will be False.\\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\\n The condition will be True, and the number of allowed\\n disruptions are provided by the disruptionsAllowed property.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), - withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withCurrentHealthy':: d.fn(help='"current number of healthy pods"', args=[d.arg(name='currentHealthy', type=d.T.integer)]), - withCurrentHealthy(currentHealthy): { currentHealthy: currentHealthy }, - '#withDesiredHealthy':: d.fn(help='"minimum desired number of healthy pods"', args=[d.arg(name='desiredHealthy', type=d.T.integer)]), - withDesiredHealthy(desiredHealthy): { desiredHealthy: desiredHealthy }, - '#withDisruptedPods':: d.fn(help="\"DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.\"", args=[d.arg(name='disruptedPods', type=d.T.object)]), - withDisruptedPods(disruptedPods): { disruptedPods: disruptedPods }, - '#withDisruptedPodsMixin':: d.fn(help="\"DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='disruptedPods', type=d.T.object)]), - withDisruptedPodsMixin(disruptedPods): { disruptedPods+: disruptedPods }, - '#withDisruptionsAllowed':: d.fn(help='"Number of pod disruptions that are currently allowed."', args=[d.arg(name='disruptionsAllowed', type=d.T.integer)]), - withDisruptionsAllowed(disruptionsAllowed): { disruptionsAllowed: disruptionsAllowed }, - '#withExpectedPods':: d.fn(help='"total number of pods counted by this disruption budget"', args=[d.arg(name='expectedPods', type=d.T.integer)]), - withExpectedPods(expectedPods): { expectedPods: expectedPods }, - '#withObservedGeneration':: d.fn(help="\"Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.\"", args=[d.arg(name='observedGeneration', type=d.T.integer)]), - withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet deleted file mode 100644 index 6147e227838..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicy.libsonnet +++ /dev/null @@ -1,178 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podSecurityPolicy', url='', help='"PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PodSecurityPolicy', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'policy/v1beta1', - kind: 'PodSecurityPolicy', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"PodSecurityPolicySpec defines the policy enforced."'), - spec: { - '#fsGroup':: d.obj(help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - fsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { fsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { fsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { fsGroup+: { rule: rule } } }, - }, - '#runAsGroup':: d.obj(help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { runAsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { runAsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { runAsGroup+: { rule: rule } } }, - }, - '#runAsUser':: d.obj(help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsUser: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { runAsUser+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { runAsUser+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { runAsUser+: { rule: rule } } }, - }, - '#runtimeClass':: d.obj(help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - runtimeClass: { - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { spec+: { runtimeClass+: { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } } }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { spec+: { runtimeClass+: { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } } }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { spec+: { runtimeClass+: { defaultRuntimeClassName: defaultRuntimeClassName } } }, - }, - '#seLinux':: d.obj(help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - seLinux: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { spec+: { seLinux+: { seLinuxOptions+: { level: level } } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { spec+: { seLinux+: { seLinuxOptions+: { role: role } } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { seLinux+: { seLinuxOptions+: { type: type } } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { seLinux+: { seLinuxOptions+: { user: user } } } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { seLinux+: { rule: rule } } }, - }, - '#supplementalGroups':: d.obj(help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - supplementalGroups: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { spec+: { supplementalGroups+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { spec+: { supplementalGroups+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { spec+: { supplementalGroups+: { rule: rule } } }, - }, - '#withAllowPrivilegeEscalation':: d.fn(help='"allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), - withAllowPrivilegeEscalation(allowPrivilegeEscalation): { spec+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withAllowedCSIDrivers':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDrivers(allowedCSIDrivers): { spec+: { allowedCSIDrivers: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] } }, - '#withAllowedCSIDriversMixin':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDriversMixin(allowedCSIDrivers): { spec+: { allowedCSIDrivers+: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] } }, - '#withAllowedCapabilities':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilities(allowedCapabilities): { spec+: { allowedCapabilities: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] } }, - '#withAllowedCapabilitiesMixin':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilitiesMixin(allowedCapabilities): { spec+: { allowedCapabilities+: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] } }, - '#withAllowedFlexVolumes':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumes(allowedFlexVolumes): { spec+: { allowedFlexVolumes: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] } }, - '#withAllowedFlexVolumesMixin':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumesMixin(allowedFlexVolumes): { spec+: { allowedFlexVolumes+: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] } }, - '#withAllowedHostPaths':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPaths(allowedHostPaths): { spec+: { allowedHostPaths: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] } }, - '#withAllowedHostPathsMixin':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPathsMixin(allowedHostPaths): { spec+: { allowedHostPaths+: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] } }, - '#withAllowedProcMountTypes':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypes(allowedProcMountTypes): { spec+: { allowedProcMountTypes: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] } }, - '#withAllowedProcMountTypesMixin':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypesMixin(allowedProcMountTypes): { spec+: { allowedProcMountTypes+: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] } }, - '#withAllowedUnsafeSysctls':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctls(allowedUnsafeSysctls): { spec+: { allowedUnsafeSysctls: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] } }, - '#withAllowedUnsafeSysctlsMixin':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctlsMixin(allowedUnsafeSysctls): { spec+: { allowedUnsafeSysctls+: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] } }, - '#withDefaultAddCapabilities':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilities(defaultAddCapabilities): { spec+: { defaultAddCapabilities: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] } }, - '#withDefaultAddCapabilitiesMixin':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilitiesMixin(defaultAddCapabilities): { spec+: { defaultAddCapabilities+: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] } }, - '#withDefaultAllowPrivilegeEscalation':: d.fn(help='"defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process."', args=[d.arg(name='defaultAllowPrivilegeEscalation', type=d.T.boolean)]), - withDefaultAllowPrivilegeEscalation(defaultAllowPrivilegeEscalation): { spec+: { defaultAllowPrivilegeEscalation: defaultAllowPrivilegeEscalation } }, - '#withForbiddenSysctls':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctls(forbiddenSysctls): { spec+: { forbiddenSysctls: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] } }, - '#withForbiddenSysctlsMixin':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctlsMixin(forbiddenSysctls): { spec+: { forbiddenSysctls+: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] } }, - '#withHostIPC':: d.fn(help='"hostIPC determines if the policy allows the use of HostIPC in the pod spec."', args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { spec+: { hostIPC: hostIPC } }, - '#withHostNetwork':: d.fn(help='"hostNetwork determines if the policy allows the use of HostNetwork in the pod spec."', args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, - '#withHostPID':: d.fn(help='"hostPID determines if the policy allows the use of HostPID in the pod spec."', args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { spec+: { hostPID: hostPID } }, - '#withHostPorts':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPorts(hostPorts): { spec+: { hostPorts: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] } }, - '#withHostPortsMixin':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPortsMixin(hostPorts): { spec+: { hostPorts+: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] } }, - '#withPrivileged':: d.fn(help='"privileged determines if a pod can request to be run as privileged."', args=[d.arg(name='privileged', type=d.T.boolean)]), - withPrivileged(privileged): { spec+: { privileged: privileged } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), - withReadOnlyRootFilesystem(readOnlyRootFilesystem): { spec+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRequiredDropCapabilities':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilities(requiredDropCapabilities): { spec+: { requiredDropCapabilities: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] } }, - '#withRequiredDropCapabilitiesMixin':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilitiesMixin(requiredDropCapabilities): { spec+: { requiredDropCapabilities+: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] } }, - '#withVolumes':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { spec+: { volumes: if std.isArray(v=volumes) then volumes else [volumes] } }, - '#withVolumesMixin':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { spec+: { volumes+: if std.isArray(v=volumes) then volumes else [volumes] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet deleted file mode 100644 index 49130ef886a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podSecurityPolicySpec.libsonnet +++ /dev/null @@ -1,125 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podSecurityPolicySpec', url='', help='"PodSecurityPolicySpec defines the policy enforced."'), - '#fsGroup':: d.obj(help='"FSGroupStrategyOptions defines the strategy type and options used to create the strategy."'), - fsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { fsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { fsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what FSGroup is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { fsGroup+: { rule: rule } }, - }, - '#runAsGroup':: d.obj(help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsGroup: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { runAsGroup+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { runAsGroup+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { runAsGroup+: { rule: rule } }, - }, - '#runAsUser':: d.obj(help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - runAsUser: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { runAsUser+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { runAsUser+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { runAsUser+: { rule: rule } }, - }, - '#runtimeClass':: d.obj(help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - runtimeClass: { - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { runtimeClass+: { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { runtimeClass+: { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] } }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { runtimeClass+: { defaultRuntimeClassName: defaultRuntimeClassName } }, - }, - '#seLinux':: d.obj(help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - seLinux: { - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { seLinux+: { seLinuxOptions+: { level: level } } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { seLinux+: { seLinuxOptions+: { role: role } } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { seLinux+: { seLinuxOptions+: { type: type } } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { seLinux+: { seLinuxOptions+: { user: user } } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { seLinux+: { rule: rule } }, - }, - '#supplementalGroups':: d.obj(help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - supplementalGroups: { - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { supplementalGroups+: { ranges: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { supplementalGroups+: { ranges+: if std.isArray(v=ranges) then ranges else [ranges] } }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { supplementalGroups+: { rule: rule } }, - }, - '#withAllowPrivilegeEscalation':: d.fn(help='"allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), - withAllowPrivilegeEscalation(allowPrivilegeEscalation): { allowPrivilegeEscalation: allowPrivilegeEscalation }, - '#withAllowedCSIDrivers':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDrivers(allowedCSIDrivers): { allowedCSIDrivers: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] }, - '#withAllowedCSIDriversMixin':: d.fn(help='"AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedCSIDrivers', type=d.T.array)]), - withAllowedCSIDriversMixin(allowedCSIDrivers): { allowedCSIDrivers+: if std.isArray(v=allowedCSIDrivers) then allowedCSIDrivers else [allowedCSIDrivers] }, - '#withAllowedCapabilities':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilities(allowedCapabilities): { allowedCapabilities: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] }, - '#withAllowedCapabilitiesMixin':: d.fn(help="\"allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allowedCapabilities', type=d.T.array)]), - withAllowedCapabilitiesMixin(allowedCapabilities): { allowedCapabilities+: if std.isArray(v=allowedCapabilities) then allowedCapabilities else [allowedCapabilities] }, - '#withAllowedFlexVolumes':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumes(allowedFlexVolumes): { allowedFlexVolumes: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] }, - '#withAllowedFlexVolumesMixin':: d.fn(help='"allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \\"volumes\\" field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedFlexVolumes', type=d.T.array)]), - withAllowedFlexVolumesMixin(allowedFlexVolumes): { allowedFlexVolumes+: if std.isArray(v=allowedFlexVolumes) then allowedFlexVolumes else [allowedFlexVolumes] }, - '#withAllowedHostPaths':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPaths(allowedHostPaths): { allowedHostPaths: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] }, - '#withAllowedHostPathsMixin':: d.fn(help='"allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedHostPaths', type=d.T.array)]), - withAllowedHostPathsMixin(allowedHostPaths): { allowedHostPaths+: if std.isArray(v=allowedHostPaths) then allowedHostPaths else [allowedHostPaths] }, - '#withAllowedProcMountTypes':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypes(allowedProcMountTypes): { allowedProcMountTypes: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] }, - '#withAllowedProcMountTypesMixin':: d.fn(help='"AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedProcMountTypes', type=d.T.array)]), - withAllowedProcMountTypesMixin(allowedProcMountTypes): { allowedProcMountTypes+: if std.isArray(v=allowedProcMountTypes) then allowedProcMountTypes else [allowedProcMountTypes] }, - '#withAllowedUnsafeSysctls':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctls(allowedUnsafeSysctls): { allowedUnsafeSysctls: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] }, - '#withAllowedUnsafeSysctlsMixin':: d.fn(help='"allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\\n\\nExamples: e.g. \\"foo/*\\" allows \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" allows \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedUnsafeSysctls', type=d.T.array)]), - withAllowedUnsafeSysctlsMixin(allowedUnsafeSysctls): { allowedUnsafeSysctls+: if std.isArray(v=allowedUnsafeSysctls) then allowedUnsafeSysctls else [allowedUnsafeSysctls] }, - '#withDefaultAddCapabilities':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilities(defaultAddCapabilities): { defaultAddCapabilities: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] }, - '#withDefaultAddCapabilitiesMixin':: d.fn(help='"defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='defaultAddCapabilities', type=d.T.array)]), - withDefaultAddCapabilitiesMixin(defaultAddCapabilities): { defaultAddCapabilities+: if std.isArray(v=defaultAddCapabilities) then defaultAddCapabilities else [defaultAddCapabilities] }, - '#withDefaultAllowPrivilegeEscalation':: d.fn(help='"defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process."', args=[d.arg(name='defaultAllowPrivilegeEscalation', type=d.T.boolean)]), - withDefaultAllowPrivilegeEscalation(defaultAllowPrivilegeEscalation): { defaultAllowPrivilegeEscalation: defaultAllowPrivilegeEscalation }, - '#withForbiddenSysctls':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctls(forbiddenSysctls): { forbiddenSysctls: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] }, - '#withForbiddenSysctlsMixin':: d.fn(help='"forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \\"*\\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\\n\\nExamples: e.g. \\"foo/*\\" forbids \\"foo/bar\\", \\"foo/baz\\", etc. e.g. \\"foo.*\\" forbids \\"foo.bar\\", \\"foo.baz\\", etc."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='forbiddenSysctls', type=d.T.array)]), - withForbiddenSysctlsMixin(forbiddenSysctls): { forbiddenSysctls+: if std.isArray(v=forbiddenSysctls) then forbiddenSysctls else [forbiddenSysctls] }, - '#withHostIPC':: d.fn(help='"hostIPC determines if the policy allows the use of HostIPC in the pod spec."', args=[d.arg(name='hostIPC', type=d.T.boolean)]), - withHostIPC(hostIPC): { hostIPC: hostIPC }, - '#withHostNetwork':: d.fn(help='"hostNetwork determines if the policy allows the use of HostNetwork in the pod spec."', args=[d.arg(name='hostNetwork', type=d.T.boolean)]), - withHostNetwork(hostNetwork): { hostNetwork: hostNetwork }, - '#withHostPID':: d.fn(help='"hostPID determines if the policy allows the use of HostPID in the pod spec."', args=[d.arg(name='hostPID', type=d.T.boolean)]), - withHostPID(hostPID): { hostPID: hostPID }, - '#withHostPorts':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPorts(hostPorts): { hostPorts: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] }, - '#withHostPortsMixin':: d.fn(help='"hostPorts determines which host port ranges are allowed to be exposed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostPorts', type=d.T.array)]), - withHostPortsMixin(hostPorts): { hostPorts+: if std.isArray(v=hostPorts) then hostPorts else [hostPorts] }, - '#withPrivileged':: d.fn(help='"privileged determines if a pod can request to be run as privileged."', args=[d.arg(name='privileged', type=d.T.boolean)]), - withPrivileged(privileged): { privileged: privileged }, - '#withReadOnlyRootFilesystem':: d.fn(help='"readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), - withReadOnlyRootFilesystem(readOnlyRootFilesystem): { readOnlyRootFilesystem: readOnlyRootFilesystem }, - '#withRequiredDropCapabilities':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilities(requiredDropCapabilities): { requiredDropCapabilities: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] }, - '#withRequiredDropCapabilitiesMixin':: d.fn(help='"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requiredDropCapabilities', type=d.T.array)]), - withRequiredDropCapabilitiesMixin(requiredDropCapabilities): { requiredDropCapabilities+: if std.isArray(v=requiredDropCapabilities) then requiredDropCapabilities else [requiredDropCapabilities] }, - '#withVolumes':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumes(volumes): { volumes: if std.isArray(v=volumes) then volumes else [volumes] }, - '#withVolumesMixin':: d.fn(help="\"volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumes', type=d.T.array)]), - withVolumesMixin(volumes): { volumes+: if std.isArray(v=volumes) then volumes else [volumes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet deleted file mode 100644 index ce9ce9e48e4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsGroupStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runAsGroupStrategyOptions', url='', help='"RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsGroup values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet deleted file mode 100644 index 4eff3f52590..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runAsUserStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runAsUserStrategyOptions', url='', help='"RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable RunAsUser values that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet deleted file mode 100644 index d6e31153e35..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/runtimeClassStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='runtimeClassStrategyOptions', url='', help='"RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod."'), - '#withAllowedRuntimeClassNames':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNames(allowedRuntimeClassNames): { allowedRuntimeClassNames: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] }, - '#withAllowedRuntimeClassNamesMixin':: d.fn(help='"allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \\"*\\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedRuntimeClassNames', type=d.T.array)]), - withAllowedRuntimeClassNamesMixin(allowedRuntimeClassNames): { allowedRuntimeClassNames+: if std.isArray(v=allowedRuntimeClassNames) then allowedRuntimeClassNames else [allowedRuntimeClassNames] }, - '#withDefaultRuntimeClassName':: d.fn(help='"defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod."', args=[d.arg(name='defaultRuntimeClassName', type=d.T.string)]), - withDefaultRuntimeClassName(defaultRuntimeClassName): { defaultRuntimeClassName: defaultRuntimeClassName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet deleted file mode 100644 index 513de55c3d5..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/seLinuxStrategyOptions.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='seLinuxStrategyOptions', url='', help='"SELinuxStrategyOptions defines the strategy type and any options used to create the strategy."'), - '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), - seLinuxOptions: { - '#withLevel':: d.fn(help='"Level is SELinux level label that applies to the container."', args=[d.arg(name='level', type=d.T.string)]), - withLevel(level): { seLinuxOptions+: { level: level } }, - '#withRole':: d.fn(help='"Role is a SELinux role label that applies to the container."', args=[d.arg(name='role', type=d.T.string)]), - withRole(role): { seLinuxOptions+: { role: role } }, - '#withType':: d.fn(help='"Type is a SELinux type label that applies to the container."', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { seLinuxOptions+: { type: type } }, - '#withUser':: d.fn(help='"User is a SELinux user label that applies to the container."', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { seLinuxOptions+: { user: user } }, - }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate the allowable labels that may be set."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet deleted file mode 100644 index e9505d76d0d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/supplementalGroupsStrategyOptions.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='supplementalGroupsStrategyOptions', url='', help='"SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy."'), - '#withRanges':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."', args=[d.arg(name='ranges', type=d.T.array)]), - withRanges(ranges): { ranges: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRangesMixin':: d.fn(help='"ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ranges', type=d.T.array)]), - withRangesMixin(ranges): { ranges+: if std.isArray(v=ranges) then ranges else [ranges] }, - '#withRule':: d.fn(help='"rule is the strategy that will dictate what supplemental groups is used in the SecurityContext."', args=[d.arg(name='rule', type=d.T.string)]), - withRule(rule): { rule: rule }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet deleted file mode 100644 index 086524f9025..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/aggregationRule.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='aggregationRule', url='', help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet deleted file mode 100644 index 870b3ac95ae..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleRef.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleRef', url='', help='"RoleRef contains information that points to the role being used"'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet deleted file mode 100644 index 06c6868aae1..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/subject.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subject', url='', help='"Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names."'), - '#withApiGroup':: d.fn(help='"APIGroup holds the API group of the referenced subject. Defaults to \\"\\" for ServiceAccount subjects. Defaults to \\"rbac.authorization.k8s.io\\" for User and Group subjects."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind of object being referenced. Values defined by this API group are \\"User\\", \\"Group\\", and \\"ServiceAccount\\". If the Authorizer does not recognized the kind value, the Authorizer should report an error."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace of the referenced object. If the object kind is non-namespace, such as \\"User\\" or \\"Group\\", and this value is not empty the Authorizer should report an error."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet deleted file mode 100644 index 086524f9025..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='aggregationRule', url='', help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet deleted file mode 100644 index 01149764463..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRole.libsonnet +++ /dev/null @@ -1,67 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRole', url='', help='"ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22."'), - '#aggregationRule':: d.obj(help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - aggregationRule: { - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'ClusterRole', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet deleted file mode 100644 index f03fd577ec2..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'ClusterRoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet deleted file mode 100644 index cababc33d00..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - aggregationRule: (import 'aggregationRule.libsonnet'), - clusterRole: (import 'clusterRole.libsonnet'), - clusterRoleBinding: (import 'clusterRoleBinding.libsonnet'), - policyRule: (import 'policyRule.libsonnet'), - role: (import 'role.libsonnet'), - roleBinding: (import 'roleBinding.libsonnet'), - roleRef: (import 'roleRef.libsonnet'), - subject: (import 'subject.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet deleted file mode 100644 index 9b15ccb2c07..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/policyRule.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."', args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet deleted file mode 100644 index 0f7d7b3b7ad..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/role.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'Role', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this Role"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this Role"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet deleted file mode 100644 index c1e906e19e8..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'RoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet deleted file mode 100644 index 870b3ac95ae..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleRef', url='', help='"RoleRef contains information that points to the role being used"'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet deleted file mode 100644 index 29bc1b90432..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/subject.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subject', url='', help='"Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names."'), - '#withKind':: d.fn(help='"Kind of object being referenced. Values defined by this API group are \\"User\\", \\"Group\\", and \\"ServiceAccount\\". If the Authorizer does not recognized the kind value, the Authorizer should report an error."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace of the referenced object. If the object kind is non-namespace, such as \\"User\\" or \\"Group\\", and this value is not empty the Authorizer should report an error."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet deleted file mode 100644 index 086524f9025..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/aggregationRule.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='aggregationRule', url='', help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet deleted file mode 100644 index e4ff8663399..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRole.libsonnet +++ /dev/null @@ -1,67 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRole', url='', help='"ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22."'), - '#aggregationRule':: d.obj(help='"AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole"'), - aggregationRule: { - '#withClusterRoleSelectors':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectors(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - '#withClusterRoleSelectorsMixin':: d.fn(help="\"ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='clusterRoleSelectors', type=d.T.array)]), - withClusterRoleSelectorsMixin(clusterRoleSelectors): { aggregationRule+: { clusterRoleSelectors+: if std.isArray(v=clusterRoleSelectors) then clusterRoleSelectors else [clusterRoleSelectors] } }, - }, - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'ClusterRole', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this ClusterRole"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet deleted file mode 100644 index 6c0a6fbb951..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/clusterRoleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'ClusterRoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet deleted file mode 100644 index 67591074ee6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/main.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - aggregationRule: (import 'aggregationRule.libsonnet'), - clusterRole: (import 'clusterRole.libsonnet'), - clusterRoleBinding: (import 'clusterRoleBinding.libsonnet'), - policyRule: (import 'policyRule.libsonnet'), - role: (import 'role.libsonnet'), - roleBinding: (import 'roleBinding.libsonnet'), - roleRef: (import 'roleRef.libsonnet'), - subject: (import 'subject.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet deleted file mode 100644 index 548338dfcc1..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/policyRule.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), - withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withNonResourceURLsMixin':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), - withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, - '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), - withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.\"", args=[d.arg(name='resources', type=d.T.array)]), - withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), - withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), - withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet deleted file mode 100644 index 26abb745707..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/role.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'Role', - } + self.metadata.withName(name=name), - '#withRules':: d.fn(help='"Rules holds all the PolicyRules for this Role"', args=[d.arg(name='rules', type=d.T.array)]), - withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"Rules holds all the PolicyRules for this Role"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), - withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet deleted file mode 100644 index 9712e16f485..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleBinding.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1beta1', - kind: 'RoleBinding', - } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet deleted file mode 100644 index 870b3ac95ae..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/roleRef.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='roleRef', url='', help='"RoleRef contains information that points to the role being used"'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet deleted file mode 100644 index 06c6868aae1..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet +++ /dev/null @@ -1,14 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='subject', url='', help='"Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names."'), - '#withApiGroup':: d.fn(help='"APIGroup holds the API group of the referenced subject. Defaults to \\"\\" for ServiceAccount subjects. Defaults to \\"rbac.authorization.k8s.io\\" for User and Group subjects."', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind of object being referenced. Values defined by this API group are \\"User\\", \\"Group\\", and \\"ServiceAccount\\". If the Authorizer does not recognized the kind value, the Authorizer should report an error."', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace of the referenced object. If the object kind is non-namespace, such as \\"User\\" or \\"Group\\", and this value is not empty the Authorizer should report an error."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { namespace: namespace }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet deleted file mode 100644 index 5339b8fa918..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/priorityClass.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityClass', url='', help='"DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'scheduling.k8s.io/v1alpha1', - kind: 'PriorityClass', - } + self.metadata.withName(name=name), - '#withDescription':: d.fn(help='"description is an arbitrary string that usually provides guidelines on when this priority class should be used."', args=[d.arg(name='description', type=d.T.string)]), - withDescription(description): { description: description }, - '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), - withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet deleted file mode 100644 index 3b681adec03..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/main.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), - priorityClass: (import 'priorityClass.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet deleted file mode 100644 index 6530293e600..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1beta1/priorityClass.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='priorityClass', url='', help='"DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'scheduling.k8s.io/v1beta1', - kind: 'PriorityClass', - } + self.metadata.withName(name=name), - '#withDescription':: d.fn(help='"description is an arbitrary string that usually provides guidelines on when this priority class should be used."', args=[d.arg(name='description', type=d.T.string)]), - withDescription(description): { description: description }, - '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), - withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), - withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), - withValue(value): { value: value }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet deleted file mode 100644 index f07c80be789..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriverSpec.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { attachRequired: attachRequired }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet deleted file mode 100644 index cceebc97156..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeDriver.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNodeDriver', url='', help='"CSINodeDriver holds information about the specification of one CSI driver installed on a node"'), - '#allocatable':: d.obj(help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - allocatable: { - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { allocatable+: { count: count } }, - }, - '#withName':: d.fn(help='"This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNodeID':: d.fn(help='"nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \\"node1\\", but the storage system may refer to the same node as \\"nodeA\\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \\"nodeA\\" instead of \\"node1\\". This field is required."', args=[d.arg(name='nodeID', type=d.T.string)]), - withNodeID(nodeID): { nodeID: nodeID }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \\"company.com/zone\\", \\"company.com/region\\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \\"company.com/zone\\", \\"company.com/region\\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet deleted file mode 100644 index a0a47b4846e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNodeSpec.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNodeSpec', url='', help='"CSINodeSpec holds information about the specification of all CSI drivers installed on a node"'), - '#withDrivers':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."', args=[d.arg(name='drivers', type=d.T.array)]), - withDrivers(drivers): { drivers: if std.isArray(v=drivers) then drivers else [drivers] }, - '#withDriversMixin':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drivers', type=d.T.array)]), - withDriversMixin(drivers): { drivers+: if std.isArray(v=drivers) then drivers else [drivers] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet deleted file mode 100644 index 86d2b1336c5..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/main.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1', url='', help=''), - csiDriver: (import 'csiDriver.libsonnet'), - csiDriverSpec: (import 'csiDriverSpec.libsonnet'), - csiNode: (import 'csiNode.libsonnet'), - csiNodeDriver: (import 'csiNodeDriver.libsonnet'), - csiNodeSpec: (import 'csiNodeSpec.libsonnet'), - storageClass: (import 'storageClass.libsonnet'), - tokenRequest: (import 'tokenRequest.libsonnet'), - volumeAttachment: (import 'volumeAttachment.libsonnet'), - volumeAttachmentSource: (import 'volumeAttachmentSource.libsonnet'), - volumeAttachmentSpec: (import 'volumeAttachmentSpec.libsonnet'), - volumeAttachmentStatus: (import 'volumeAttachmentStatus.libsonnet'), - volumeError: (import 'volumeError.libsonnet'), - volumeNodeResources: (import 'volumeNodeResources.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet deleted file mode 100644 index 47ca4b20576..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSource.libsonnet +++ /dev/null @@ -1,419 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSource', url='', help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { claimRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { claimRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { claimRef+: { uid: uid } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 638334f0b9d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet deleted file mode 100644 index d9894f93089..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeNodeResources.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet deleted file mode 100644 index e5f0a9d679a..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/csiStorageCapacity.libsonnet +++ /dev/null @@ -1,73 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSIStorageCapacity', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1alpha1', - kind: 'CSIStorageCapacity', - } + self.metadata.withName(name=name), - '#nodeTopology':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), - nodeTopology: { - '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressions(matchExpressions): { nodeTopology+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), - withMatchExpressionsMixin(matchExpressions): { nodeTopology+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, - '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabels(matchLabels): { nodeTopology+: { matchLabels: matchLabels } }, - '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), - withMatchLabelsMixin(matchLabels): { nodeTopology+: { matchLabels+: matchLabels } }, - }, - '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), - withCapacity(capacity): { capacity: capacity }, - '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), - withMaximumVolumeSize(maximumVolumeSize): { maximumVolumeSize: maximumVolumeSize }, - '#withStorageClassName':: d.fn(help='"The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { storageClassName: storageClassName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet deleted file mode 100644 index d84b7da6f7f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/main.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1alpha1', url='', help=''), - csiStorageCapacity: (import 'csiStorageCapacity.libsonnet'), - volumeAttachment: (import 'volumeAttachment.libsonnet'), - volumeAttachmentSource: (import 'volumeAttachmentSource.libsonnet'), - volumeAttachmentSpec: (import 'volumeAttachmentSpec.libsonnet'), - volumeAttachmentStatus: (import 'volumeAttachmentStatus.libsonnet'), - volumeError: (import 'volumeError.libsonnet'), -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet deleted file mode 100644 index 1ca30669fb4..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachment.libsonnet +++ /dev/null @@ -1,479 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1alpha1', - kind: 'VolumeAttachment', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - spec: { - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { nodeName: nodeName } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet deleted file mode 100644 index 47ca4b20576..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet +++ /dev/null @@ -1,419 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSource', url='', help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { claimRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { claimRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { claimRef+: { uid: uid } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet deleted file mode 100644 index 8b092052a56..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSpec.libsonnet +++ /dev/null @@ -1,426 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSpec', url='', help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 155e537a9c8..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet deleted file mode 100644 index f77069c446f..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeError.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { time: time }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet deleted file mode 100644 index 34a6fb0a69b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriver.libsonnet +++ /dev/null @@ -1,77 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriver', url='', help='"CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSIDriver', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'CSIDriver', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CSIDriverSpec is the specification of a CSIDriver."'), - spec: { - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { spec+: { attachRequired: attachRequired } }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { spec+: { fsGroupPolicy: fsGroupPolicy } }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { spec+: { podInfoOnMount: podInfoOnMount } }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { spec+: { requiresRepublish: requiresRepublish } }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { spec+: { storageCapacity: storageCapacity } }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { spec+: { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { spec+: { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withVolumeLifecycleModes':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { spec+: { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { spec+: { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet deleted file mode 100644 index 0618b0af909..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiDriverSpec.libsonnet +++ /dev/null @@ -1,24 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), - '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), - withAttachRequired(attachRequired): { attachRequired: attachRequired }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), - withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), - withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), - withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), - withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), - withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, - '#withVolumeLifecycleModes':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), - withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet deleted file mode 100644 index 634ba66a6da..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNode.libsonnet +++ /dev/null @@ -1,63 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNode', url='', help="\"DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.\""), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of CSINode', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'CSINode', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"CSINodeSpec holds information about the specification of all CSI drivers installed on a node"'), - spec: { - '#withDrivers':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."', args=[d.arg(name='drivers', type=d.T.array)]), - withDrivers(drivers): { spec+: { drivers: if std.isArray(v=drivers) then drivers else [drivers] } }, - '#withDriversMixin':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drivers', type=d.T.array)]), - withDriversMixin(drivers): { spec+: { drivers+: if std.isArray(v=drivers) then drivers else [drivers] } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet deleted file mode 100644 index 15a1402bc51..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet +++ /dev/null @@ -1,19 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNodeDriver', url='', help='"CSINodeDriver holds information about the specification of one CSI driver installed on a node"'), - '#allocatable':: d.obj(help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - allocatable: { - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { allocatable+: { count: count } }, - }, - '#withName':: d.fn(help='"This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { name: name }, - '#withNodeID':: d.fn(help='"nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \\"node1\\", but the storage system may refer to the same node as \\"nodeA\\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \\"nodeA\\" instead of \\"node1\\". This field is required."', args=[d.arg(name='nodeID', type=d.T.string)]), - withNodeID(nodeID): { nodeID: nodeID }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \\"company.com/zone\\", \\"company.com/region\\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \\"company.com/zone\\", \\"company.com/region\\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet deleted file mode 100644 index a0a47b4846e..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiNodeSpec', url='', help='"CSINodeSpec holds information about the specification of all CSI drivers installed on a node"'), - '#withDrivers':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."', args=[d.arg(name='drivers', type=d.T.array)]), - withDrivers(drivers): { drivers: if std.isArray(v=drivers) then drivers else [drivers] }, - '#withDriversMixin':: d.fn(help='"drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='drivers', type=d.T.array)]), - withDriversMixin(drivers): { drivers+: if std.isArray(v=drivers) then drivers else [drivers] }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet deleted file mode 100644 index 3c1b88e164b..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/storageClass.libsonnet +++ /dev/null @@ -1,76 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='storageClass', url='', help='"StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\\n\\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of StorageClass', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'StorageClass', - } + self.metadata.withName(name=name), - '#withAllowVolumeExpansion':: d.fn(help='"AllowVolumeExpansion shows whether the storage class allow volume expand"', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), - withAllowVolumeExpansion(allowVolumeExpansion): { allowVolumeExpansion: allowVolumeExpansion }, - '#withAllowedTopologies':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), - withAllowedTopologies(allowedTopologies): { allowedTopologies: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withAllowedTopologiesMixin':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), - withAllowedTopologiesMixin(allowedTopologies): { allowedTopologies+: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withMountOptions':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withParameters':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), - withParameters(parameters): { parameters: parameters }, - '#withParametersMixin':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), - withParametersMixin(parameters): { parameters+: parameters }, - '#withProvisioner':: d.fn(help='"Provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), - withProvisioner(provisioner): { provisioner: provisioner }, - '#withReclaimPolicy':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), - withReclaimPolicy(reclaimPolicy): { reclaimPolicy: reclaimPolicy }, - '#withVolumeBindingMode':: d.fn(help='"VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), - withVolumeBindingMode(volumeBindingMode): { volumeBindingMode: volumeBindingMode }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet deleted file mode 100644 index 50628cb06f6..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/tokenRequest.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest contains parameters of a service account token."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), - withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), - withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet deleted file mode 100644 index 5dbd7b70227..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachment.libsonnet +++ /dev/null @@ -1,479 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), - '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), - metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), - withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, - '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), - withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, - '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), - withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, - '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), - withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, - '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), - withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), - withGenerateName(generateName): { metadata+: { generateName: generateName } }, - '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), - withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), - withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), - withLabelsMixin(labels): { metadata+: { labels+: labels } }, - '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), - withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { metadata+: { namespace: namespace } }, - '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), - withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, - '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), - withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { metadata+: { uid: uid } }, - }, - '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), - new(name): { - apiVersion: 'storage.k8s.io/v1beta1', - kind: 'VolumeAttachment', - } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - spec: { - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { spec+: { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { spec+: { nodeName: nodeName } }, - }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet deleted file mode 100644 index 47ca4b20576..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSource.libsonnet +++ /dev/null @@ -1,419 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSource', url='', help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { claimRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { claimRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { claimRef+: { uid: uid } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet deleted file mode 100644 index 8b092052a56..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentSpec.libsonnet +++ /dev/null @@ -1,426 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentSpec', url='', help='"VolumeAttachmentSpec is the specification of a VolumeAttachment request."'), - '#source':: d.obj(help='"VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set."'), - source: { - '#inlineVolumeSpec':: d.obj(help='"PersistentVolumeSpec is the specification of a persistent volume."'), - inlineVolumeSpec: { - '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), - awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, - }, - '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), - azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), - withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), - withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), - withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, - }, - '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), - azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), - withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), - withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), - withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, - }, - '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), - cephfs: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), - withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, - }, - '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), - cinder: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, - }, - '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - claimRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { claimRef+: { apiVersion: apiVersion } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { claimRef+: { fieldPath: fieldPath } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { claimRef+: { kind: kind } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { claimRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { claimRef+: { namespace: namespace } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { claimRef+: { resourceVersion: resourceVersion } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { claimRef+: { uid: uid } } } }, - }, - '#csi':: d.obj(help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), - csi: { - '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, - }, - '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, - }, - '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), - withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), - withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, - }, - '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), - fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), - withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), - withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), - withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - }, - '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), - flexVolume: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), - withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), - withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), - withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, - }, - '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), - withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), - withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, - }, - '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), - gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), - withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), - withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, - }, - '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), - withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), - withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, - }, - '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), - withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, - }, - '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), - iscsi: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), - withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), - withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), - withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), - withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), - withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), - withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), - withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), - withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), - withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, - }, - '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), - 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, - }, - '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), - withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), - withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, - }, - '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), - nodeAffinity: { - '#required':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), - required: { - '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTerms(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), - withNodeSelectorTermsMixin(nodeSelectorTerms): { source+: { inlineVolumeSpec+: { nodeAffinity+: { required+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } } } }, - }, - }, - '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), - photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), - withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, - }, - '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), - portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), - withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, - }, - '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), - withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), - withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), - withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), - withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, - }, - '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), - rbd: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), - withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), - withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), - withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), - withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), - withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, - }, - '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), - scaleIO: { - '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), - withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), - withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), - withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), - withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), - withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), - withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, - }, - '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), - storageos: { - '#secretRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), - secretRef: { - '#withApiVersion':: d.fn(help='"API version of the referent."', args=[d.arg(name='apiVersion', type=d.T.string)]), - withApiVersion(apiVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { apiVersion: apiVersion } } } } }, - '#withFieldPath':: d.fn(help='"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \\"spec.containers{name}\\" (where \\"name\\" refers to the name of the container that triggered the event) or if no container name is specified \\"spec.containers[2]\\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object."', args=[d.arg(name='fieldPath', type=d.T.string)]), - withFieldPath(fieldPath): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { fieldPath: fieldPath } } } } }, - '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { kind: kind } } } } }, - '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/"', args=[d.arg(name='namespace', type=d.T.string)]), - withNamespace(namespace): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { namespace: namespace } } } } }, - '#withResourceVersion':: d.fn(help='"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), - withResourceVersion(resourceVersion): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { resourceVersion: resourceVersion } } } } }, - '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), - withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, - }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), - withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), - withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), - withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, - }, - '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), - vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), - withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), - withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), - withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), - withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, - }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), - withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), - withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), - withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), - withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), - withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, - '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), - withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, - }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), - withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, - }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), - withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), - withNodeName(nodeName): { nodeName: nodeName }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet deleted file mode 100644 index 638334f0b9d..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeAttachmentStatus.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), - '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - attachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { attachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { attachError+: { time: time } }, - }, - '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), - detachError: { - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { detachError+: { message: message } }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { detachError+: { time: time } }, - }, - '#withAttached':: d.fn(help='"Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), - withAttached(attached): { attached: attached }, - '#withAttachmentMetadata':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, - '#withAttachmentMetadataMixin':: d.fn(help='"Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), - withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet deleted file mode 100644 index 87f1e4e18b8..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeError.libsonnet +++ /dev/null @@ -1,10 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), - withMessage(message): { message: message }, - '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), - withTime(time): { time: time }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet deleted file mode 100644 index 0281968fcfe..00000000000 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/volumeNodeResources.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -{ - local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), - withCount(count): { count: count }, - '#mixin': 'ignore', - mixin: self, -} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet similarity index 91% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet index 193b26205f3..1586fa73fd2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/apps.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/apps.libsonnet @@ -1,5 +1,5 @@ -local d = import 'doc-util/main.libsonnet'; local gen = import '../gen.libsonnet'; +local d = import 'doc-util/main.libsonnet'; local patch = { daemonSet+: { @@ -73,12 +73,7 @@ local patch = { }; { - [if std.objectHas(gen, 'extensions') then 'extensions']+: { // This was removed in v1.22 - [if std.objectHas(gen.extensions, 'v1beta1') then 'v1beta1']+: patch, - }, apps+: { v1+: patch, - v1beta1+: patch, - v1beta2+: patch, }, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet similarity index 88% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet index 2769fb7c118..d48005cfad7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/autoscaling.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/autoscaling.libsonnet @@ -9,7 +9,7 @@ local withApiVersion = { local withScaleTargetRef = { '#withScaleTargetRef':: d.fn(help='Set spec.ScaleTargetRef to `object`', args=[d.arg(name='object', type=d.T.object)]), withScaleTargetRef(object): - { spec+: { scaleTargetRef+: withApiVersion { + { spec+: { scaleTargetRef+: { apiVersion: object.apiVersion, kind: object.kind, name: object.metadata.name, @@ -26,7 +26,6 @@ local patch = { { autoscaling+: { v1+: patch, - v2beta1+: patch, - v2beta2+: patch, + v2+: patch, }, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet similarity index 92% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet index c4ccdfc3de8..3b39ad76806 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/batch.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/batch.libsonnet @@ -21,7 +21,6 @@ local patch = { { batch+: { - v1beta1+: patch, - v2alpha1+: patch, + v1+: patch, }, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/core.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/core.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/core.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/core.libsonnet index b342d37e2b2..7f4577a5c41 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/core.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/core.libsonnet @@ -39,15 +39,48 @@ local d = import 'doc-util/main.libsonnet'; for envvar in env ]), - '#withEnvMap': d.fn( - '`withEnvMap` works like `withEnvMixin` but accepts a key/value map, this map is converted a list of core.v1.envVar(key, value)`', + '#withEnvMap': d.fn(||| + + `withEnvMap` works like `withEnvMixin` but accepts a key/value map, + this map is converted a list of core.v1.envVar(key, value)`. + + If the value is an object instead of a string, it is placed under + the `valueFrom` key. + + |||, [d.arg('env', d.T.object)] ), withEnvMap(env):: self.withEnvMixin([ - $.core.v1.envVar.new(k, env[k]) + ( + if std.type(env[k]) == 'object' then + $.core.v1.envVar.withName(k) + + { valueFrom: env[k] } + else + $.core.v1.envVar.new(k, env[k]) + ) for k in std.objectFields(env) ]), + + withResourcesRequests(cpu, memory):: + self.resources.withRequests( + (if cpu != null + then { cpu: cpu } + else {}) + + (if memory != null + then { memory: memory } + else {}) + ), + + withResourcesLimits(cpu, memory):: + self.resources.withLimits( + (if cpu != null + then { cpu: cpu } + else {}) + + (if memory != null + then { memory: memory } + else {}) + ), }, containerPort+: { @@ -127,6 +160,11 @@ local d = import 'doc-util/main.libsonnet'; super.new(name) + super.spec.withSelector(selector) + super.spec.withPorts(ports), + '#newWithoutSelector'+: d.fn('newWithoutSelector works like `new`, but creates a Service without ports and selector', [ + d.arg('name', d.T.string), + ]), + newWithoutSelector(name):: + super.new(name), }, servicePort+:: { @@ -196,6 +234,20 @@ local d = import 'doc-util/main.libsonnet'; ]), fromSecret(name, secretName):: super.withName(name) + super.secret.withSecretName(secretName), + + '#fromCsi': d.fn('Creates a new volume of type `csi`', [ + d.arg('name', d.T.string), + d.arg('driver', d.T.string), + d.arg('volumeAttributes', d.T.object, {}), + ]), + fromCsi(name, driver, volumeAttributes={}):: + super.withName(name) + { + csi: { + driver: driver, + readOnly: true, + volumeAttributes: volumeAttributes + } + }, }, volumeMount+:: { diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/list.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/list.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/list.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/list.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet similarity index 61% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet index 66d4e44892a..bba2659bb6b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/mapContainers.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/mapContainers.libsonnet @@ -13,36 +13,40 @@ local patch = { |||, [d.arg('f', d.T.func)] ), - mapContainers(f):: { + mapContainers(f, includeInitContainers=false):: { local podContainers = super.spec.template.spec.containers, + local podInitContainers = super.spec.template.spec.initContainers, spec+: { template+: { spec+: { containers: std.map(f, podContainers), + [if includeInitContainers then 'initContainers']: std.map(f, podInitContainers), }, }, }, }, '#mapContainersWithName': d.fn('`mapContainersWithName` is like `mapContainers`, but only applies to those containers in the `names` array', - [d.arg('names', d.T.array), d.arg('f', d.T.func)]), - mapContainersWithName(names, f):: + [d.arg('names', d.T.array), d.arg('f', d.T.func)]), + mapContainersWithName(names, f, includeInitContainers=false):: local nameSet = if std.type(names) == 'array' then std.set(names) else std.set([names]); local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0; - self.mapContainers(function(c) if std.objectHas(c, 'name') && inNameSet(c.name) then f(c) else c), + self.mapContainers(function(c) if std.objectHas(c, 'name') && inNameSet(c.name) then f(c) else c, includeInitContainers), }; // batch.job and batch.cronJob have the podSpec at a different location local cronPatch = patch { - mapContainers(f):: { + mapContainers(f, includeInitContainers=false):: { local podContainers = super.spec.jobTemplate.spec.template.spec.containers, + local podInitContainers = super.spec.jobTemplate.spec.template.spec.initContainers, spec+: { jobTemplate+: { spec+: { template+: { spec+: { containers: std.map(f, podContainers), + [if includeInitContainers then 'initContainers']: std.map(f, podInitContainers), }, }, }, @@ -52,23 +56,25 @@ local cronPatch = patch { }; { - core+: { v1+: { - pod+: patch, - podTemplate+: patch, - replicationController+: patch, - } }, + core+: { + v1+: { + pod+: patch, + podTemplate+: patch, + replicationController+: patch, + }, + }, batch+: { v1+: { job+: patch, - }, - v1beta1+: { cronJob+: cronPatch, }, }, - apps+: { v1+: { - daemonSet+: patch, - deployment+: patch, - replicaSet+: patch, - statefulSet+: patch, - } }, + apps+: { + v1+: { + daemonSet+: patch, + deployment+: patch, + replicaSet+: patch, + statefulSet+: patch, + }, + }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet similarity index 95% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet index 69327fb6116..ab3d1ab04c4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_custom/rbac.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/rbac.libsonnet @@ -37,7 +37,5 @@ local patch = { { rbac+: { v1+: patch, - v1alpha1+: patch, - v1beta1+: patch, }, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet new file mode 100644 index 00000000000..9c911e8e14d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_custom/volumeMounts.libsonnet @@ -0,0 +1,323 @@ +local d = import 'doc-util/main.libsonnet'; + +{ + local container = $.core.v1.container, + local volumeMount = $.core.v1.volumeMount, + local volume = $.core.v1.volume, + + local patch = { + local volumeMountDescription = + ||| + This helper function can be augmented with a `volumeMountsMixin`. For example, + passing "k.core.v1.volumeMount.withSubPath(subpath)" will result in a subpath + mixin. + |||, + + + '#configVolumeMount': d.fn( + ||| + `configVolumeMount` mounts a ConfigMap by `name` on `path`. + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.configMap.withDefaultMode(420)" will result in a + default mode mixin. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + configVolumeMount(name, path, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromConfigMap(name, name) + volumeMixin]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + + + '#configMapVolumeMount': d.fn( + ||| + `configMapVolumeMount` mounts a `configMap` on `path`. It will + also add an annotation hash to ensure the pods are re-deployed when the config map + changes. + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.configMap.withDefaultMode(420)" will result in a + default mode mixin. + ||| + + volumeMountDescription, + [ + d.arg('configMap', d.T.object), + d.arg('path', d.T.string), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + configMapVolumeMount(configMap, path, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local name = configMap.metadata.name, + hash = std.md5(std.toString(configMap)); + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromConfigMap(name, name) + volumeMixin]; + local annotations = { ['%s-hash' % name]: hash }; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then + super.spec.template.spec.withVolumesMixin(volumeMixins) + + super.spec.template.metadata.withAnnotationsMixin(annotations) + else + super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins) + + super.spec.jobTemplate.spec.template.metadata.withAnnotationsMixin(annotations), + + + '#hostVolumeMount': d.fn( + ||| + `hostVolumeMount` mounts a `hostPath` on `path`. + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.hostPath.withType('Socket')" will result in a + socket type mixin. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('hostPath', d.T.string), + d.arg('path', d.T.string), + d.arg('readOnly', d.T.bool), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + hostVolumeMount(name, hostPath, path, readOnly=false, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path, readOnly=readOnly) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromHostPath(name, hostPath) + volumeMixin]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + + + '#pvcVolumeMount': d.fn( + ||| + `hostVolumeMount` mounts a PersistentVolumeClaim by `name` on `path`. + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.persistentVolumeClaim.withReadOnly(true)" will result in a + mixin that forces all container mounts to be read-only. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('readOnly', d.T.bool), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + pvcVolumeMount(name, path, readOnly=false, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path, readOnly=readOnly) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromPersistentVolumeClaim(name, name) + volumeMixin]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + + + '#secretVolumeMount': d.fn( + ||| + `secretVolumeMount` mounts a Secret by `name` into all container on `path`.' + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.secret.withOptional(true)" will result in a + mixin that allows the secret to be optional. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('defaultMode', d.T.string), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + secretVolumeMount(name, path, defaultMode=256, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [ + volume.fromSecret(name, secretName=name) + + volume.secret.withDefaultMode(defaultMode) + + volumeMixin, + ]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + + '#secretVolumeMountAnnotated': d.fn( + 'same as `secretVolumeMount`, adding an annotation to force redeploy on change.' + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('defaultMode', d.T.string), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + secretVolumeMountAnnotated(name, path, defaultMode=256, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local annotations = { ['%s-secret-hash' % name]: std.md5(std.toString(name)) }; + + self.secretVolumeMount(name, path, defaultMode, volumeMountMixin, volumeMixin, containers) + + super.spec.template.metadata.withAnnotationsMixin(annotations), + + '#emptyVolumeMount': d.fn( + ||| + `emptyVolumeMount` mounts empty volume by `name` into all container on `path`. + + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.emptyDir.withSizeLimit('100Mi')" will result in a + mixin that limits the size of the volume to 100Mi. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + emptyVolumeMount(name, path, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromEmptyDir(name) + volumeMixin]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + + '#csiVolumeMount': d.fn( + ||| + `csiVolumeMount` mounts CSI volume by `name` into all container on `path`. + If `containers` is specified as an array of container names it will only be mounted + to those containers, otherwise it will be mounted on all containers. + This helper function can be augmented with a `volumeMixin`. For example, + passing "k.core.v1.volume.csi.withReadOnly(false)" will result in a + mixin that makes the volume writeable. + ||| + + volumeMountDescription, + [ + d.arg('name', d.T.string), + d.arg('path', d.T.string), + d.arg('driver', d.T.string), + d.arg('volumeAttributes', d.T.object, {}), + d.arg('volumeMountMixin', d.T.object), + d.arg('volumeMixin', d.T.object), + d.arg('containers', d.T.array), + ] + ), + csiVolumeMount(name, path, driver, volumeAttributes, volumeMountMixin={}, volumeMixin={}, containers=null, includeInitContainers=false):: + local addMount(c) = c + ( + if containers == null || std.member(containers, c.name) + then container.withVolumeMountsMixin( + volumeMount.new(name, path) + + volumeMountMixin, + ) + else {} + ); + local volumeMixins = [volume.fromCsi(name, driver, volumeAttributes) + volumeMixin]; + + super.mapContainers(addMount, includeInitContainers=includeInitContainers) + + if std.objectHas(super.spec, 'template') + then super.spec.template.spec.withVolumesMixin(volumeMixins) + else super.spec.jobTemplate.spec.template.spec.withVolumesMixin(volumeMixins), + }, + + batch+: { + v1+: { + job+: patch, + cronJob+: patch, + }, + }, + apps+: { + v1+: { + daemonSet+: patch, + deployment+: patch, + replicaSet+: patch, + statefulSet+: patch, + }, + }, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet index 5c9c107733f..a64ea12d58a 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/main.libsonnet @@ -2,5 +2,6 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='admissionregistration', url='', help=''), v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet similarity index 91% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet index c98e455e884..3d4dad1ab2d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), + matchCondition: (import 'matchCondition.libsonnet'), mutatingWebhook: (import 'mutatingWebhook.libsonnet'), mutatingWebhookConfiguration: (import 'mutatingWebhookConfiguration.libsonnet'), ruleWithOperations: (import 'ruleWithOperations.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet new file mode 100644 index 00000000000..d28c501da10 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help='"MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet index 1822f48700b..6bf8f54c1f9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhook.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhook.libsonnet @@ -47,6 +47,10 @@ withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet index d8375bfd0e7..b3108c6fa60 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/mutatingWebhookConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='mutatingWebhookConfiguration', url='', help='"MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of MutatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/ruleWithOperations.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/ruleWithOperations.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/ruleWithOperations.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/serviceReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/serviceReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/serviceReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet index ae3795634c8..911443e0935 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhook.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhook.libsonnet @@ -47,6 +47,10 @@ withAdmissionReviewVersionsMixin(admissionReviewVersions): { admissionReviewVersions+: if std.isArray(v=admissionReviewVersions) then admissionReviewVersions else [admissionReviewVersions] }, '#withFailurePolicy':: d.fn(help='"FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\\n\\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"rules\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, '#withName':: d.fn(help='"The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \\"imagepolicy\\" is the name of the webhook, and kubernetes.io is the name of the organization. Required."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet index 3583a79bd8a..4814f4616f5 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/validatingWebhookConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='validatingWebhookConfiguration', url='', help='"ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ValidatingWebhookConfiguration', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/webhookClientConfig.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/admissionregistration/v1beta1/webhookClientConfig.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1/webhookClientConfig.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet new file mode 100644 index 00000000000..96c7387270c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/auditAnnotation.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='auditAnnotation', url='', help='"AuditAnnotation describes how to produce an audit annotation for an API request."'), + '#withKey':: d.fn(help='"key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\\n\\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \\"{ValidatingAdmissionPolicy name}/{key}\\".\\n\\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\\n\\nRequired."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withValueExpression':: d.fn(help='"valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\\n\\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\\n\\nRequired."', args=[d.arg(name='valueExpression', type=d.T.string)]), + withValueExpression(valueExpression): { valueExpression: valueExpression }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet new file mode 100644 index 00000000000..895ad9dcdcb --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/expressionWarning.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='expressionWarning', url='', help='"ExpressionWarning is a warning information that targets a specific expression."'), + '#withFieldRef':: d.fn(help='"The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \\"spec.validations[0].expression\\', args=[d.arg(name='fieldRef', type=d.T.string)]), + withFieldRef(fieldRef): { fieldRef: fieldRef }, + '#withWarning':: d.fn(help='"The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler."', args=[d.arg(name='warning', type=d.T.string)]), + withWarning(warning): { warning: warning }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..cd3fb220d90 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + auditAnnotation: (import 'auditAnnotation.libsonnet'), + expressionWarning: (import 'expressionWarning.libsonnet'), + matchCondition: (import 'matchCondition.libsonnet'), + matchResources: (import 'matchResources.libsonnet'), + namedRuleWithOperations: (import 'namedRuleWithOperations.libsonnet'), + paramKind: (import 'paramKind.libsonnet'), + paramRef: (import 'paramRef.libsonnet'), + typeChecking: (import 'typeChecking.libsonnet'), + validatingAdmissionPolicy: (import 'validatingAdmissionPolicy.libsonnet'), + validatingAdmissionPolicyBinding: (import 'validatingAdmissionPolicyBinding.libsonnet'), + validatingAdmissionPolicyBindingSpec: (import 'validatingAdmissionPolicyBindingSpec.libsonnet'), + validatingAdmissionPolicySpec: (import 'validatingAdmissionPolicySpec.libsonnet'), + validatingAdmissionPolicyStatus: (import 'validatingAdmissionPolicyStatus.libsonnet'), + validation: (import 'validation.libsonnet'), + variable: (import 'variable.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet new file mode 100644 index 00000000000..3873071158c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help=''), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet new file mode 100644 index 00000000000..0215d0c956c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/matchResources.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchResources', url='', help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet new file mode 100644 index 00000000000..a1726a57081 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/namedRuleWithOperations.libsonnet @@ -0,0 +1,28 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='namedRuleWithOperations', url='', help='"NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames."'), + '#withApiGroups':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiVersions':: d.fn(help="\"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiVersions', type=d.T.array)]), + withApiVersions(apiVersions): { apiVersions: if std.isArray(v=apiVersions) then apiVersions else [apiVersions] }, + '#withApiVersionsMixin':: d.fn(help="\"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiVersions', type=d.T.array)]), + withApiVersionsMixin(apiVersions): { apiVersions+: if std.isArray(v=apiVersions) then apiVersions else [apiVersions] }, + '#withOperations':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='operations', type=d.T.array)]), + withOperations(operations): { operations: if std.isArray(v=operations) then operations else [operations] }, + '#withOperationsMixin':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='operations', type=d.T.array)]), + withOperationsMixin(operations): { operations+: if std.isArray(v=operations) then operations else [operations] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"", args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withScope':: d.fn(help='"scope specifies the scope of this rule. Valid values are \\"Cluster\\", \\"Namespaced\\", and \\"*\\" \\"Cluster\\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \\"Namespaced\\" means that only namespaced resources will match this rule. \\"*\\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \\"*\\"."', args=[d.arg(name='scope', type=d.T.string)]), + withScope(scope): { scope: scope }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet new file mode 100644 index 00000000000..11a349422a7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramKind.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramKind', url='', help='"ParamKind is a tuple of Group Kind and Version."'), + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet similarity index 51% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet index 84917662145..9781cdd7115 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/paramRef.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='podDisruptionBudgetSpec', url='', help='"PodDisruptionBudgetSpec is a description of a PodDisruptionBudget."'), + '#':: d.pkg(name='paramRef', url='', help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -12,10 +12,12 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, }, - '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), - withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, - '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), - withMinAvailable(minAvailable): { minAvailable: minAvailable }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { parameterNotFoundAction: parameterNotFoundAction }, '#mixin': 'ignore', mixin: self, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet new file mode 100644 index 00000000000..1af60568c08 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/typeChecking.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typeChecking', url='', help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet new file mode 100644 index 00000000000..a1b1a4fe407 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicy.libsonnet @@ -0,0 +1,117 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicy', url='', help='"ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicy', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1alpha1', + kind: 'ValidatingAdmissionPolicy', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + spec: { + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchConstraints+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { paramKind+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { paramKind+: { kind: kind } } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { spec+: { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { spec+: { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { spec+: { failurePolicy: failurePolicy } }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { spec+: { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { spec+: { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { spec+: { validations: if std.isArray(v=validations) then validations else [validations] } }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { spec+: { validations+: if std.isArray(v=validations) then validations else [validations] } }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { spec+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { spec+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet new file mode 100644 index 00000000000..3a6143e95a8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBinding.libsonnet @@ -0,0 +1,118 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBinding', url='', help="\"ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\\n\\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\\n\\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicyBinding', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1alpha1', + kind: 'ValidatingAdmissionPolicyBinding', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + spec: { + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchResources+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { paramRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { paramRef+: { namespace: namespace } } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { spec+: { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { spec+: { policyName: policyName } }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { spec+: { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { spec+: { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet new file mode 100644 index 00000000000..c08ef317d61 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyBindingSpec.libsonnet @@ -0,0 +1,67 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBindingSpec', url='', help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchResources+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { paramRef+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { paramRef+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"`name` is the name of the resource being referenced.\\n\\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { paramRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { paramRef+: { namespace: namespace } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny` Default to `Deny`"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { policyName: policyName }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet new file mode 100644 index 00000000000..3bc96c0cd22 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicySpec.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicySpec', url='', help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchConstraints+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { paramKind+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { paramKind+: { kind: kind } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { validations: if std.isArray(v=validations) then validations else [validations] }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { validations+: if std.isArray(v=validations) then validations else [validations] }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { variables: if std.isArray(v=variables) then variables else [variables] }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { variables+: if std.isArray(v=variables) then variables else [variables] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet new file mode 100644 index 00000000000..a9130f09b74 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validatingAdmissionPolicyStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyStatus', url='', help='"ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy."'), + '#typeChecking':: d.obj(help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + typeChecking: { + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { typeChecking+: { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { typeChecking+: { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + }, + '#withConditions':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withObservedGeneration':: d.fn(help='"The generation observed by the controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet new file mode 100644 index 00000000000..db493d390af --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/validation.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validation', url='', help='"Validation specifies the CEL expression which is used to apply the validation."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\\n\\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\n\\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\\n\\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\\n\\t \\\"true\\\", \\\"false\\\", \\\"null\\\", \\\"in\\\", \\\"as\\\", \\\"break\\\", \\\"const\\\", \\\"continue\\\", \\\"else\\\", \\\"for\\\", \\\"function\\\", \\\"if\\\",\\n\\t \\\"import\\\", \\\"let\\\", \\\"loop\\\", \\\"package\\\", \\\"namespace\\\", \\\"return\\\".\\nExamples:\\n - Expression accessing a property named \\\"namespace\\\": {\\\"Expression\\\": \\\"object.__namespace__ \u003e 0\\\"}\\n - Expression accessing a property named \\\"x-prop\\\": {\\\"Expression\\\": \\\"object.x__dash__prop \u003e 0\\\"}\\n - Expression accessing a property named \\\"redact__d\\\": {\\\"Expression\\\": \\\"object.redact__underscores__d \u003e 0\\\"}\\n\\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\\n non-intersecting elements in `Y` are appended, retaining their partial order.\\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\\n non-intersecting keys are appended, retaining their partial order.\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withMessage':: d.fn(help='"Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \\"failed rule: {Rule}\\". e.g. \\"must be a URL with the host matching spec.host\\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \\"failed Expression: {Expression}\\"."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withMessageExpression':: d.fn(help="\"messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \\\"object.x must be less than max (\\\"+string(params.max)+\\\")\\", args=[d.arg(name='messageExpression', type=d.T.string)]), + withMessageExpression(messageExpression): { messageExpression: messageExpression }, + '#withReason':: d.fn(help='"Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \\"Unauthorized\\", \\"Forbidden\\", \\"Invalid\\", \\"RequestEntityTooLarge\\". If not set, StatusReasonInvalid is used in the response to the client."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet new file mode 100644 index 00000000000..98f05ca7b2f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1alpha1/variable.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='variable', url='', help='"Variable is the definition of a variable that is used for composition."'), + '#withExpression':: d.fn(help='"Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation."', args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help='"Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \\"foo\\", the variable will be available as `variables.foo`"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet new file mode 100644 index 00000000000..96c7387270c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/auditAnnotation.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='auditAnnotation', url='', help='"AuditAnnotation describes how to produce an audit annotation for an API request."'), + '#withKey':: d.fn(help='"key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\\n\\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \\"{ValidatingAdmissionPolicy name}/{key}\\".\\n\\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\\n\\nRequired."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withValueExpression':: d.fn(help='"valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\\n\\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\\n\\nRequired."', args=[d.arg(name='valueExpression', type=d.T.string)]), + withValueExpression(valueExpression): { valueExpression: valueExpression }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet new file mode 100644 index 00000000000..895ad9dcdcb --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/expressionWarning.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='expressionWarning', url='', help='"ExpressionWarning is a warning information that targets a specific expression."'), + '#withFieldRef':: d.fn(help='"The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \\"spec.validations[0].expression\\', args=[d.arg(name='fieldRef', type=d.T.string)]), + withFieldRef(fieldRef): { fieldRef: fieldRef }, + '#withWarning':: d.fn(help='"The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler."', args=[d.arg(name='warning', type=d.T.string)]), + withWarning(warning): { warning: warning }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet new file mode 100644 index 00000000000..4a6b9fd4b56 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + auditAnnotation: (import 'auditAnnotation.libsonnet'), + expressionWarning: (import 'expressionWarning.libsonnet'), + matchCondition: (import 'matchCondition.libsonnet'), + matchResources: (import 'matchResources.libsonnet'), + namedRuleWithOperations: (import 'namedRuleWithOperations.libsonnet'), + paramKind: (import 'paramKind.libsonnet'), + paramRef: (import 'paramRef.libsonnet'), + typeChecking: (import 'typeChecking.libsonnet'), + validatingAdmissionPolicy: (import 'validatingAdmissionPolicy.libsonnet'), + validatingAdmissionPolicyBinding: (import 'validatingAdmissionPolicyBinding.libsonnet'), + validatingAdmissionPolicyBindingSpec: (import 'validatingAdmissionPolicyBindingSpec.libsonnet'), + validatingAdmissionPolicySpec: (import 'validatingAdmissionPolicySpec.libsonnet'), + validatingAdmissionPolicyStatus: (import 'validatingAdmissionPolicyStatus.libsonnet'), + validation: (import 'validation.libsonnet'), + variable: (import 'variable.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet new file mode 100644 index 00000000000..8d196606489 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchCondition.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchCondition', url='', help='"MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\\n\\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\\n\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help="\"Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\\n\\nRequired.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet new file mode 100644 index 00000000000..0215d0c956c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/matchResources.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='matchResources', url='', help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { namespaceSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { objectSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { objectSelector+: { matchLabels+: matchLabels } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchPolicy: matchPolicy }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet new file mode 100644 index 00000000000..a1726a57081 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/namedRuleWithOperations.libsonnet @@ -0,0 +1,28 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='namedRuleWithOperations', url='', help='"NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames."'), + '#withApiGroups':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help="\"APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiVersions':: d.fn(help="\"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='apiVersions', type=d.T.array)]), + withApiVersions(apiVersions): { apiVersions: if std.isArray(v=apiVersions) then apiVersions else [apiVersions] }, + '#withApiVersionsMixin':: d.fn(help="\"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='apiVersions', type=d.T.array)]), + withApiVersionsMixin(apiVersions): { apiVersions+: if std.isArray(v=apiVersions) then apiVersions else [apiVersions] }, + '#withOperations':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"", args=[d.arg(name='operations', type=d.T.array)]), + withOperations(operations): { operations: if std.isArray(v=operations) then operations else [operations] }, + '#withOperationsMixin':: d.fn(help="\"Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='operations', type=d.T.array)]), + withOperationsMixin(operations): { operations+: if std.isArray(v=operations) then operations else [operations] }, + '#withResourceNames':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), + withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"", args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to.\\n\\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\\n\\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\\n\\nDepending on the enclosing object, subresources might not be allowed. Required.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withScope':: d.fn(help='"scope specifies the scope of this rule. Valid values are \\"Cluster\\", \\"Namespaced\\", and \\"*\\" \\"Cluster\\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \\"Namespaced\\" means that only namespaced resources will match this rule. \\"*\\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \\"*\\"."', args=[d.arg(name='scope', type=d.T.string)]), + withScope(scope): { scope: scope }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet new file mode 100644 index 00000000000..11a349422a7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramKind.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramKind', url='', help='"ParamKind is a tuple of Group Kind and Version."'), + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet new file mode 100644 index 00000000000..51da556e5c7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/paramRef.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='paramRef', url='', help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { selector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { parameterNotFoundAction: parameterNotFoundAction }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet new file mode 100644 index 00000000000..1af60568c08 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/typeChecking.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typeChecking', url='', help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet new file mode 100644 index 00000000000..789e7e9adf2 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicy.libsonnet @@ -0,0 +1,117 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicy', url='', help='"ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicy', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1beta1', + kind: 'ValidatingAdmissionPolicy', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + spec: { + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchConstraints+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { spec+: { paramKind+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { paramKind+: { kind: kind } } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { spec+: { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { spec+: { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] } }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { spec+: { failurePolicy: failurePolicy } }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { spec+: { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { spec+: { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] } }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { spec+: { validations: if std.isArray(v=validations) then validations else [validations] } }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { spec+: { validations+: if std.isArray(v=validations) then validations else [validations] } }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { spec+: { variables: if std.isArray(v=variables) then variables else [variables] } }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { spec+: { variables+: if std.isArray(v=variables) then variables else [variables] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet new file mode 100644 index 00000000000..12276863e67 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBinding.libsonnet @@ -0,0 +1,118 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBinding', url='', help="\"ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\\n\\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\\n\\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.\""), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ValidatingAdmissionPolicyBinding', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'admissionregistration.k8s.io/v1beta1', + kind: 'ValidatingAdmissionPolicyBinding', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + spec: { + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { spec+: { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { spec+: { matchResources+: { matchPolicy: matchPolicy } } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { spec+: { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { spec+: { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { spec+: { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { spec+: { paramRef+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { paramRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { paramRef+: { namespace: namespace } } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { spec+: { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { spec+: { policyName: policyName } }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { spec+: { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { spec+: { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet new file mode 100644 index 00000000000..e1ebe62488f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyBindingSpec.libsonnet @@ -0,0 +1,67 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyBindingSpec', url='', help='"ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding."'), + '#matchResources':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchResources: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchResources+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchResources+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchResources+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchResources+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchResources+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchResources+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchResources+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchResources+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramRef':: d.obj(help='"ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding."'), + paramRef: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { paramRef+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { paramRef+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { paramRef+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { paramRef+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource being referenced.\\n\\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\\n\\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { paramRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\\n\\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\\n\\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\\n\\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { paramRef+: { namespace: namespace } }, + '#withParameterNotFoundAction':: d.fn(help='"`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\\n\\nAllowed values are `Allow` or `Deny`\\n\\nRequired"', args=[d.arg(name='parameterNotFoundAction', type=d.T.string)]), + withParameterNotFoundAction(parameterNotFoundAction): { paramRef+: { parameterNotFoundAction: parameterNotFoundAction } }, + }, + '#withPolicyName':: d.fn(help='"PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required."', args=[d.arg(name='policyName', type=d.T.string)]), + withPolicyName(policyName): { policyName: policyName }, + '#withValidationActions':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActions(validationActions): { validationActions: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#withValidationActionsMixin':: d.fn(help="\"validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\\n\\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\\n\\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\\n\\nThe supported actions values are:\\n\\n\\\"Deny\\\" specifies that a validation failure results in a denied request.\\n\\n\\\"Warn\\\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\\n\\n\\\"Audit\\\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\\\"validation.policy.admission.k8s.io/validation_failure\\\": \\\"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\\\"`\\n\\nClients should expect to handle additional values by ignoring any values not recognized.\\n\\n\\\"Deny\\\" and \\\"Warn\\\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\\n\\nRequired.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='validationActions', type=d.T.array)]), + withValidationActionsMixin(validationActions): { validationActions+: if std.isArray(v=validationActions) then validationActions else [validationActions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet new file mode 100644 index 00000000000..3bc96c0cd22 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicySpec.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicySpec', url='', help='"ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy."'), + '#matchConstraints':: d.obj(help='"MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"'), + matchConstraints: { + '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + namespaceSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { namespaceSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { namespaceSelector+: { matchLabels+: matchLabels } } }, + }, + '#objectSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + objectSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchConstraints+: { objectSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchConstraints+: { objectSelector+: { matchLabels+: matchLabels } } }, + }, + '#withExcludeResourceRules':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRules(excludeResourceRules): { matchConstraints+: { excludeResourceRules: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withExcludeResourceRulesMixin':: d.fn(help='"ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='excludeResourceRules', type=d.T.array)]), + withExcludeResourceRulesMixin(excludeResourceRules): { matchConstraints+: { excludeResourceRules+: if std.isArray(v=excludeResourceRules) then excludeResourceRules else [excludeResourceRules] } }, + '#withMatchPolicy':: d.fn(help='"matchPolicy defines how the \\"MatchResources\\" list is used to match incoming requests. Allowed values are \\"Exact\\" or \\"Equivalent\\".\\n\\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\\n\\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \\"rules\\" only included `apiGroups:[\\"apps\\"], apiVersions:[\\"v1\\"], resources: [\\"deployments\\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\\n\\nDefaults to \\"Equivalent\\', args=[d.arg(name='matchPolicy', type=d.T.string)]), + withMatchPolicy(matchPolicy): { matchConstraints+: { matchPolicy: matchPolicy } }, + '#withResourceRules':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { matchConstraints+: { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + '#withResourceRulesMixin':: d.fn(help='"ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { matchConstraints+: { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] } }, + }, + '#paramKind':: d.obj(help='"ParamKind is a tuple of Group Kind and Version."'), + paramKind: { + '#withApiVersion':: d.fn(help='"APIVersion is the API group version the resources belong to. In format of \\"group/version\\". Required."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { paramKind+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"Kind is the API kind the resources belong to. Required."', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { paramKind+: { kind: kind } }, + }, + '#withAuditAnnotations':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotations(auditAnnotations): { auditAnnotations: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withAuditAnnotationsMixin':: d.fn(help='"auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='auditAnnotations', type=d.T.array)]), + withAuditAnnotationsMixin(auditAnnotations): { auditAnnotations+: if std.isArray(v=auditAnnotations) then auditAnnotations else [auditAnnotations] }, + '#withFailurePolicy':: d.fn(help='"failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\\n\\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\\n\\nfailurePolicy does not define how validations that evaluate to false are handled.\\n\\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\\n\\nAllowed values are Ignore or Fail. Defaults to Fail."', args=[d.arg(name='failurePolicy', type=d.T.string)]), + withFailurePolicy(failurePolicy): { failurePolicy: failurePolicy }, + '#withMatchConditions':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditions(matchConditions): { matchConditions: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withMatchConditionsMixin':: d.fn(help='"MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\\n\\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\\n\\nThe exact matching logic is (in order):\\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\\n 3. If any matchCondition evaluates to an error (but none are FALSE):\\n - If failurePolicy=Fail, reject the request\\n - If failurePolicy=Ignore, the policy is skipped"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchConditions', type=d.T.array)]), + withMatchConditionsMixin(matchConditions): { matchConditions+: if std.isArray(v=matchConditions) then matchConditions else [matchConditions] }, + '#withValidations':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."', args=[d.arg(name='validations', type=d.T.array)]), + withValidations(validations): { validations: if std.isArray(v=validations) then validations else [validations] }, + '#withValidationsMixin':: d.fn(help='"Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='validations', type=d.T.array)]), + withValidationsMixin(validations): { validations+: if std.isArray(v=validations) then validations else [validations] }, + '#withVariables':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."', args=[d.arg(name='variables', type=d.T.array)]), + withVariables(variables): { variables: if std.isArray(v=variables) then variables else [variables] }, + '#withVariablesMixin':: d.fn(help='"Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\\n\\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='variables', type=d.T.array)]), + withVariablesMixin(variables): { variables+: if std.isArray(v=variables) then variables else [variables] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet new file mode 100644 index 00000000000..6d150d52f4a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validatingAdmissionPolicyStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validatingAdmissionPolicyStatus', url='', help='"ValidatingAdmissionPolicyStatus represents the status of an admission validation policy."'), + '#typeChecking':: d.obj(help='"TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy"'), + typeChecking: { + '#withExpressionWarnings':: d.fn(help='"The type checking warnings for each expression."', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarnings(expressionWarnings): { typeChecking+: { expressionWarnings: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + '#withExpressionWarningsMixin':: d.fn(help='"The type checking warnings for each expression."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='expressionWarnings', type=d.T.array)]), + withExpressionWarningsMixin(expressionWarnings): { typeChecking+: { expressionWarnings+: if std.isArray(v=expressionWarnings) then expressionWarnings else [expressionWarnings] } }, + }, + '#withConditions':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"The conditions represent the latest available observations of a policy's current state.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withObservedGeneration':: d.fn(help='"The generation observed by the controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet new file mode 100644 index 00000000000..db493d390af --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/validation.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='validation', url='', help='"Validation specifies the CEL expression which is used to apply the validation."'), + '#withExpression':: d.fn(help="\"Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\\n\\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\\n request resource.\\n\\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\\n\\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\\n\\t \\\"true\\\", \\\"false\\\", \\\"null\\\", \\\"in\\\", \\\"as\\\", \\\"break\\\", \\\"const\\\", \\\"continue\\\", \\\"else\\\", \\\"for\\\", \\\"function\\\", \\\"if\\\",\\n\\t \\\"import\\\", \\\"let\\\", \\\"loop\\\", \\\"package\\\", \\\"namespace\\\", \\\"return\\\".\\nExamples:\\n - Expression accessing a property named \\\"namespace\\\": {\\\"Expression\\\": \\\"object.__namespace__ \u003e 0\\\"}\\n - Expression accessing a property named \\\"x-prop\\\": {\\\"Expression\\\": \\\"object.x__dash__prop \u003e 0\\\"}\\n - Expression accessing a property named \\\"redact__d\\\": {\\\"Expression\\\": \\\"object.redact__underscores__d \u003e 0\\\"}\\n\\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\\n non-intersecting elements in `Y` are appended, retaining their partial order.\\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\\n non-intersecting keys are appended, retaining their partial order.\\nRequired.\"", args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withMessage':: d.fn(help='"Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \\"failed rule: {Rule}\\". e.g. \\"must be a URL with the host matching spec.host\\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \\"failed Expression: {Expression}\\"."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withMessageExpression':: d.fn(help="\"messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \\\"object.x must be less than max (\\\"+string(params.max)+\\\")\\", args=[d.arg(name='messageExpression', type=d.T.string)]), + withMessageExpression(messageExpression): { messageExpression: messageExpression }, + '#withReason':: d.fn(help='"Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \\"Unauthorized\\", \\"Forbidden\\", \\"Invalid\\", \\"RequestEntityTooLarge\\". If not set, StatusReasonInvalid is used in the response to the client."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet new file mode 100644 index 00000000000..084c27b79b3 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/admissionregistration/v1beta1/variable.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='variable', url='', help='"Variable is the definition of a variable that is used for composition. A variable is defined as a named expression."'), + '#withExpression':: d.fn(help='"Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation."', args=[d.arg(name='expression', type=d.T.string)]), + withExpression(expression): { expression: expression }, + '#withName':: d.fn(help='"Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \\"foo\\", the variable will be available as `variables.foo`"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet index f4839e1e05b..9ec84c9aac9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='apiregistration', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet index e1671ea3b21..53413e2a246 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/apiService.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiService.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='apiService', url='', help='"APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\"."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of APIService', args=[d.arg(name='name', type=d.T.string)]), @@ -66,7 +64,7 @@ withCaBundle(caBundle): { spec+: { caBundle: caBundle } }, '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { group: group } }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), + '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), withGroupPriorityMinimum(groupPriorityMinimum): { spec+: { groupPriorityMinimum: groupPriorityMinimum } }, '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), withInsecureSkipTLSVerify(insecureSkipTLSVerify): { spec+: { insecureSkipTLSVerify: insecureSkipTLSVerify } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet similarity index 96% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet index 88d858e5edd..49513ec0e31 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceSpec.libsonnet @@ -14,7 +14,7 @@ withCaBundle(caBundle): { caBundle: caBundle }, '#withGroup':: d.fn(help='"Group is the API group name this server hosts"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { group: group }, - '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), + '#withGroupPriorityMinimum':: d.fn(help="\"GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s\"", args=[d.arg(name='groupPriorityMinimum', type=d.T.integer)]), withGroupPriorityMinimum(groupPriorityMinimum): { groupPriorityMinimum: groupPriorityMinimum }, '#withInsecureSkipTLSVerify':: d.fn(help='"InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead."', args=[d.arg(name='insecureSkipTLSVerify', type=d.T.boolean)]), withInsecureSkipTLSVerify(insecureSkipTLSVerify): { insecureSkipTLSVerify: insecureSkipTLSVerify }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/apiServiceStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/apiServiceStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/serviceReference.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiregistration/v1beta1/serviceReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiregistration/v1/serviceReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet similarity index 68% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet index 52467bb3906..77f8c2fe6d2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/serverStorageVersion.libsonnet @@ -9,6 +9,10 @@ withDecodableVersionsMixin(decodableVersions): { decodableVersions+: if std.isArray(v=decodableVersions) then decodableVersions else [decodableVersions] }, '#withEncodingVersion':: d.fn(help='"The API server encodes the object to this version when persisting it in the backend (e.g., etcd)."', args=[d.arg(name='encodingVersion', type=d.T.string)]), withEncodingVersion(encodingVersion): { encodingVersion: encodingVersion }, + '#withServedVersions':: d.fn(help='"The API server can serve these versions. DecodableVersions must include all ServedVersions."', args=[d.arg(name='servedVersions', type=d.T.array)]), + withServedVersions(servedVersions): { servedVersions: if std.isArray(v=servedVersions) then servedVersions else [servedVersions] }, + '#withServedVersionsMixin':: d.fn(help='"The API server can serve these versions. DecodableVersions must include all ServedVersions."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='servedVersions', type=d.T.array)]), + withServedVersionsMixin(servedVersions): { servedVersions+: if std.isArray(v=servedVersions) then servedVersions else [servedVersions] }, '#mixin': 'ignore', mixin: self, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet index 9dcc22785b1..ded412050d1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersion.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='storageVersion', url='', help='"\\n Storage version of a specific resource."'), + '#':: d.pkg(name='storageVersion', url='', help='"Storage version of a specific resource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StorageVersion', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apiserverinternal/v1alpha1/storageVersionStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet similarity index 71% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet index 135fa851dd7..370461e3999 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/controllerRevision.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/controllerRevision.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='controllerRevision', url='', help='"ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ControllerRevision', args=[d.arg(name='name', type=d.T.string)]), @@ -51,9 +49,9 @@ apiVersion: 'apps/v1', kind: 'ControllerRevision', } + self.metadata.withName(name=name), - '#withData':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='data', type=d.T.object)]), + '#withData':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='data', type=d.T.object)]), withData(data): { data: data }, - '#withDataMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// External package: type MyAPIObject struct {\\n\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n} type PluginA struct {\\n\\tAOption string `json:\\\"aOption\\\"`\\n}\\n\\n// On the wire, the JSON will look something like this: {\\n\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\\"myPlugin\\\": {\\n\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t},\\n}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='data', type=d.T.object)]), + '#withDataMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='data', type=d.T.object)]), withDataMixin(data): { data+: data }, '#withRevision':: d.fn(help='"Revision indicates the revision of the state represented by Data."', args=[d.arg(name='revision', type=d.T.integer)]), withRevision(revision): { revision: revision }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet index 68fe17bcd9a..28fe965b6e6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSet.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSet.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='daemonSet', url='', help='"DaemonSet represents the configuration of a daemon set."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of DaemonSet', args=[d.arg(name='name', type=d.T.string)]), @@ -68,12 +66,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +80,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +102,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +163,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +183,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +194,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +230,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +244,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +262,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet index 8bb51aeb299..ef7c7925767 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetSpec.libsonnet @@ -16,12 +16,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +30,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +52,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +113,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +133,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +144,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +180,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +194,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +212,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet similarity index 93% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet index 62d339edc9c..a174182b33a 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetStatus.libsonnet @@ -15,7 +15,7 @@ withNumberAvailable(numberAvailable): { numberAvailable: numberAvailable }, '#withNumberMisscheduled':: d.fn(help='"The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/"', args=[d.arg(name='numberMisscheduled', type=d.T.integer)]), withNumberMisscheduled(numberMisscheduled): { numberMisscheduled: numberMisscheduled }, - '#withNumberReady':: d.fn(help='"The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready."', args=[d.arg(name='numberReady', type=d.T.integer)]), + '#withNumberReady':: d.fn(help='"numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition."', args=[d.arg(name='numberReady', type=d.T.integer)]), withNumberReady(numberReady): { numberReady: numberReady }, '#withNumberUnavailable':: d.fn(help='"The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)"', args=[d.arg(name='numberUnavailable', type=d.T.integer)]), withNumberUnavailable(numberUnavailable): { numberUnavailable: numberUnavailable }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/daemonSetUpdateStrategy.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet index 0556cdbc01c..fc3ceea4cf3 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deployment.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deployment.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='deployment', url='', help='"Deployment enables declarative updates for Pods and ReplicaSets."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Deployment', args=[d.arg(name='name', type=d.T.string)]), @@ -80,12 +78,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -96,21 +92,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -118,9 +114,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -179,6 +175,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -194,7 +195,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -205,26 +206,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -239,9 +242,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -253,11 +256,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -269,26 +274,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet index 90cc4395977..05f76111103 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentSpec.libsonnet @@ -28,12 +28,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -44,21 +42,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -66,9 +64,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -127,6 +125,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -142,7 +145,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -153,26 +156,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -187,9 +192,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -201,11 +206,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -217,26 +224,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet similarity index 93% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet index 8519e533802..d2d2673434f 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStatus.libsonnet @@ -11,7 +11,7 @@ withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, '#withObservedGeneration':: d.fn(help='"The generation observed by the deployment controller."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"Total number of ready pods targeted by this deployment."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods targeted by this Deployment with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, '#withReplicas':: d.fn(help='"Total number of non-terminated pods targeted by this deployment (their labels match the selector)."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStrategy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStrategy.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/deploymentStrategy.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/deploymentStrategy.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet similarity index 89% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet index beecb814b3b..a450610b0fd 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/main.libsonnet @@ -21,6 +21,8 @@ rollingUpdateStatefulSetStrategy: (import 'rollingUpdateStatefulSetStrategy.libsonnet'), statefulSet: (import 'statefulSet.libsonnet'), statefulSetCondition: (import 'statefulSetCondition.libsonnet'), + statefulSetOrdinals: (import 'statefulSetOrdinals.libsonnet'), + statefulSetPersistentVolumeClaimRetentionPolicy: (import 'statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet'), statefulSetSpec: (import 'statefulSetSpec.libsonnet'), statefulSetStatus: (import 'statefulSetStatus.libsonnet'), statefulSetUpdateStrategy: (import 'statefulSetUpdateStrategy.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet index c7a7e040d5c..2dc95569424 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSet.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSet.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='replicaSet', url='', help='"ReplicaSet ensures that a specified number of pod replicas are running at any given time."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ReplicaSet', args=[d.arg(name='name', type=d.T.string)]), @@ -68,12 +66,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +80,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +102,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +163,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +183,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +194,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +230,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +244,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +262,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet index db4677bb6b2..b172fef6266 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetSpec.libsonnet @@ -16,12 +16,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +30,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +52,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +113,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +133,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +144,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +180,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +194,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +212,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet similarity index 83% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet index b0702d1e06a..9d762ad0cb0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/replicaSetStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/replicaSetStatus.libsonnet @@ -11,9 +11,9 @@ withFullyLabeledReplicas(fullyLabeledReplicas): { fullyLabeledReplicas: fullyLabeledReplicas }, '#withObservedGeneration':: d.fn(help='"ObservedGeneration reflects the generation of the most recently observed ReplicaSet."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"The number of ready replicas for this replica set."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, - '#withReplicas':: d.fn(help='"Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDaemonSet.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDeployment.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDeployment.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/rollingUpdateDeployment.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateDeployment.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet new file mode 100644 index 00000000000..ecc7a1867cd --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/rollingUpdateStatefulSetStrategy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rollingUpdateStatefulSetStrategy', url='', help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet similarity index 83% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet index acf08a3d275..e1f5369fd24 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSet.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSet.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='statefulSet', url='', help='"StatefulSet represents a set of pods with consistent identities. Identities are defined as:\\n - Network: A single stable DNS and hostname.\\n - Storage: As many VolumeClaims as requested.\\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity."'), + '#':: d.pkg(name='statefulSet', url='', help='"StatefulSet represents a set of pods with consistent identities. Identities are defined as:\\n - Network: A single stable DNS and hostname.\\n - Storage: As many VolumeClaims as requested.\\n\\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StatefulSet', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,18 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"A StatefulSetSpec is the specification of a StatefulSet."'), spec: { + '#ordinals':: d.obj(help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + ordinals: { + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { spec+: { ordinals+: { start: start } } }, + }, + '#persistentVolumeClaimRetentionPolicy':: d.obj(help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + persistentVolumeClaimRetentionPolicy: { + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { spec+: { persistentVolumeClaimRetentionPolicy+: { whenDeleted: whenDeleted } } }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { spec+: { persistentVolumeClaimRetentionPolicy+: { whenScaled: whenScaled } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -68,12 +78,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +92,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +114,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +175,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +195,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +206,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +242,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +256,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +274,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -307,12 +332,16 @@ updateStrategy: { '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { spec+: { updateStrategy+: { rollingUpdate+: { maxUnavailable: maxUnavailable } } } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { updateStrategy+: { rollingUpdate+: { partition: partition } } } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { updateStrategy+: { type: type } } }, }, + '#withMinReadySeconds':: d.fn(help='"Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)"', args=[d.arg(name='minReadySeconds', type=d.T.integer)]), + withMinReadySeconds(minReadySeconds): { spec+: { minReadySeconds: minReadySeconds } }, '#withPodManagementPolicy':: d.fn(help='"podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once."', args=[d.arg(name='podManagementPolicy', type=d.T.string)]), withPodManagementPolicy(podManagementPolicy): { spec+: { podManagementPolicy: podManagementPolicy } }, '#withReplicas':: d.fn(help='"replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1."', args=[d.arg(name='replicas', type=d.T.integer)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet new file mode 100644 index 00000000000..d1fe8ab45ec --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetOrdinals.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statefulSetOrdinals', url='', help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { start: start }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet new file mode 100644 index 00000000000..024ca485364 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetPersistentVolumeClaimRetentionPolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statefulSetPersistentVolumeClaimRetentionPolicy', url='', help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { whenDeleted: whenDeleted }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { whenScaled: whenScaled }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet index 074efb59057..95caa6c4202 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetSpec.libsonnet @@ -1,6 +1,18 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='statefulSetSpec', url='', help='"A StatefulSetSpec is the specification of a StatefulSet."'), + '#ordinals':: d.obj(help='"StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet."'), + ordinals: { + '#withStart':: d.fn(help="\"start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\\nIf unset, defaults to 0. Replica indices will be in the range:\\n [0, .spec.replicas).\"", args=[d.arg(name='start', type=d.T.integer)]), + withStart(start): { ordinals+: { start: start } }, + }, + '#persistentVolumeClaimRetentionPolicy':: d.obj(help='"StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates."'), + persistentVolumeClaimRetentionPolicy: { + '#withWhenDeleted':: d.fn(help='"WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted."', args=[d.arg(name='whenDeleted', type=d.T.string)]), + withWhenDeleted(whenDeleted): { persistentVolumeClaimRetentionPolicy+: { whenDeleted: whenDeleted } }, + '#withWhenScaled':: d.fn(help='"WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted."', args=[d.arg(name='whenScaled', type=d.T.string)]), + withWhenScaled(whenScaled): { persistentVolumeClaimRetentionPolicy+: { whenScaled: whenScaled } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -16,12 +28,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +42,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +64,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +125,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +145,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +156,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +192,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +206,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +224,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -255,12 +282,16 @@ updateStrategy: { '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { updateStrategy+: { rollingUpdate+: { maxUnavailable: maxUnavailable } } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { updateStrategy+: { rollingUpdate+: { partition: partition } } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { updateStrategy+: { type: type } }, }, + '#withMinReadySeconds':: d.fn(help='"Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)"', args=[d.arg(name='minReadySeconds', type=d.T.integer)]), + withMinReadySeconds(minReadySeconds): { minReadySeconds: minReadySeconds }, '#withPodManagementPolicy':: d.fn(help='"podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once."', args=[d.arg(name='podManagementPolicy', type=d.T.string)]), withPodManagementPolicy(podManagementPolicy): { podManagementPolicy: podManagementPolicy }, '#withReplicas':: d.fn(help='"replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1."', args=[d.arg(name='replicas', type=d.T.integer)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet similarity index 88% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet index 50134d57c7e..3271d603af8 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetStatus.libsonnet @@ -1,6 +1,8 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='statefulSetStatus', url='', help='"StatefulSetStatus represents the current state of a StatefulSet."'), + '#withAvailableReplicas':: d.fn(help='"Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset."', args=[d.arg(name='availableReplicas', type=d.T.integer)]), + withAvailableReplicas(availableReplicas): { availableReplicas: availableReplicas }, '#withCollisionCount':: d.fn(help='"collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision."', args=[d.arg(name='collisionCount', type=d.T.integer)]), withCollisionCount(collisionCount): { collisionCount: collisionCount }, '#withConditions':: d.fn(help="\"Represents the latest available observations of a statefulset's current state.\"", args=[d.arg(name='conditions', type=d.T.array)]), @@ -13,7 +15,7 @@ withCurrentRevision(currentRevision): { currentRevision: currentRevision }, '#withObservedGeneration':: d.fn(help="\"observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.\"", args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, - '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), + '#withReadyReplicas':: d.fn(help='"readyReplicas is the number of pods created for this StatefulSet with a Ready Condition."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, '#withReplicas':: d.fn(help='"replicas is the number of Pods created by the StatefulSet controller."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet similarity index 52% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet index 765e934fc3f..de3704706f9 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/apps/v1/statefulSetUpdateStrategy.libsonnet @@ -3,7 +3,9 @@ '#':: d.pkg(name='statefulSetUpdateStrategy', url='', help='"StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy."'), '#rollingUpdate':: d.obj(help='"RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType."'), rollingUpdate: { - '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withMaxUnavailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='maxUnavailable', type=d.T.string)]), + withMaxUnavailable(maxUnavailable): { rollingUpdate+: { maxUnavailable: maxUnavailable } }, + '#withPartition':: d.fn(help='"Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { rollingUpdate+: { partition: partition } }, }, '#withType':: d.fn(help='"Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate."', args=[d.arg(name='type', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet similarity index 79% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet index 79f614c9bef..91f176810b8 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/main.libsonnet @@ -2,5 +2,6 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='authentication', url='', help=''), v1: (import 'v1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/boundObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/boundObjectReference.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/boundObjectReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/boundObjectReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet index 05cedc80c76..7953f90a2a7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/main.libsonnet @@ -2,6 +2,8 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), boundObjectReference: (import 'boundObjectReference.libsonnet'), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), tokenRequest: (import 'tokenRequest.libsonnet'), tokenRequestSpec: (import 'tokenRequestSpec.libsonnet'), tokenRequestStatus: (import 'tokenRequestStatus.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..f213497b811 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet index 97c0634dd51..bb17efa3821 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequest.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequest.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest requests a token for a given service account."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of TokenRequest', args=[d.arg(name='name', type=d.T.string)]), @@ -64,9 +62,9 @@ '#withUid':: d.fn(help='"UID of the referent."', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { boundObjectRef+: { uid: uid } } }, }, - '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), withAudiences(audiences): { spec+: { audiences: if std.isArray(v=audiences) then audiences else [audiences] } }, - '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), withAudiencesMixin(audiences): { spec+: { audiences+: if std.isArray(v=audiences) then audiences else [audiences] } }, '#withExpirationSeconds':: d.fn(help="\"ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.\"", args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { spec+: { expirationSeconds: expirationSeconds } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet similarity index 68% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet index 534a6fb1be6..5666b2da721 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestSpec.libsonnet @@ -12,9 +12,9 @@ '#withUid':: d.fn(help='"UID of the referent."', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { boundObjectRef+: { uid: uid } }, }, - '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiences':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."', args=[d.arg(name='audiences', type=d.T.array)]), withAudiences(audiences): { audiences: if std.isArray(v=audiences) then audiences else [audiences] }, - '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), + '#withAudiencesMixin':: d.fn(help='"Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='audiences', type=d.T.array)]), withAudiencesMixin(audiences): { audiences+: if std.isArray(v=audiences) then audiences else [audiences] }, '#withExpirationSeconds':: d.fn(help="\"ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.\"", args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenRequestStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenRequestStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet index c23b439d80c..e91e9bccf47 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1/tokenReview.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='tokenReview', url='', help='"TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of TokenReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/tokenReviewStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/tokenReviewStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/userInfo.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authentication/v1beta1/userInfo.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1/userInfo.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..a21668c4edb --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..9e2551741b5 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1alpha1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1alpha1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet new file mode 100644 index 00000000000..4200edfc4c3 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta1', url='', help=''), + selfSubjectReview: (import 'selfSubjectReview.libsonnet'), + selfSubjectReviewStatus: (import 'selfSubjectReviewStatus.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet new file mode 100644 index 00000000000..2fadc08a1af --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReview.libsonnet @@ -0,0 +1,54 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReview', url='', help='"SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of SelfSubjectReview', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'authentication.k8s.io/v1beta1', + kind: 'SelfSubjectReview', + } + self.metadata.withName(name=name), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet new file mode 100644 index 00000000000..a50f9fad1b4 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authentication/v1beta1/selfSubjectReviewStatus.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='selfSubjectReviewStatus', url='', help='"SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user."'), + '#userInfo':: d.obj(help='"UserInfo holds the information about the user needed to implement the user.Info interface."'), + userInfo: { + '#withExtra':: d.fn(help='"Any additional information provided by the authenticator."', args=[d.arg(name='extra', type=d.T.object)]), + withExtra(extra): { userInfo+: { extra: extra } }, + '#withExtraMixin':: d.fn(help='"Any additional information provided by the authenticator."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), + withExtraMixin(extra): { userInfo+: { extra+: extra } }, + '#withGroups':: d.fn(help='"The names of groups this user is a part of."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { userInfo+: { groups: if std.isArray(v=groups) then groups else [groups] } }, + '#withGroupsMixin':: d.fn(help='"The names of groups this user is a part of."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { userInfo+: { groups+: if std.isArray(v=groups) then groups else [groups] } }, + '#withUid':: d.fn(help='"A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { userInfo+: { uid: uid } }, + '#withUsername':: d.fn(help='"The name that uniquely identifies this user among all active users."', args=[d.arg(name='username', type=d.T.string)]), + withUsername(username): { userInfo+: { username: username } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet similarity index 75% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet index c56c20bb553..3f444d121b0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='authorization', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet index 3c0b160e00a..78cc42d028c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/localSubjectAccessReview.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/localSubjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='localSubjectAccessReview', url='', help='"LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of LocalSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceAttributes.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceAttributes.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceAttributes.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/nonResourceRule.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/nonResourceRule.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceAttributes.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceAttributes.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceAttributes.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/resourceRule.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/resourceRule.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet index 4ea9baedc76..d8fc6e1c910 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectAccessReview.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='selfSubjectAccessReview', url='', help='"SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \\"in all namespaces\\". Self is a special case, because users should always be able to check whether they can perform an action"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SelfSubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/selfSubjectAccessReviewSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectAccessReviewSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet index 5fddc7f72a2..a92ebdd26c2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReview.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='selfSubjectRulesReview', url='', help="\"SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SelfSubjectRulesReview', args=[d.arg(name='name', type=d.T.string)]), @@ -51,7 +49,7 @@ apiVersion: 'authorization.k8s.io/v1', kind: 'SelfSubjectRulesReview', } + self.metadata.withName(name=name), - '#spec':: d.obj(help=''), + '#spec':: d.obj(help='"SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview."'), spec: { '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { namespace: namespace } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet similarity index 64% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet index 901f17b4af6..d38eb6ffaea 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/selfSubjectRulesReviewSpec.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help=''), + '#':: d.pkg(name='selfSubjectRulesReviewSpec', url='', help='"SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview."'), '#withNamespace':: d.fn(help='"Namespace to evaluate rules for. Required."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet index 9f02e24779a..0f9e2dfde47 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReview.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReview.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='subjectAccessReview', url='', help='"SubjectAccessReview checks whether or not a user or group can perform an action."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of SubjectAccessReview', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectAccessReviewStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectAccessReviewStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/authorization/v1beta1/subjectRulesReviewStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/authorization/v1/subjectRulesReviewStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet similarity index 60% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet index 69a8e08c746..d93fba44a27 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/main.libsonnet @@ -2,6 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='autoscaling', url='', help=''), v1: (import 'v1/main.libsonnet'), - v2beta1: (import 'v2beta1/main.libsonnet'), - v2beta2: (import 'v2beta2/main.libsonnet'), + v2: (import 'v2/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet new file mode 100644 index 00000000000..04c0e544aa8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/crossVersionObjectReference.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet similarity index 79% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet index e44700dbf7e..8af49e01cb2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscaler.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"configuration of a horizontal pod autoscaler."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), @@ -55,18 +53,18 @@ spec: { '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleTargetRef+: { name: name } } }, }, - '#withMaxReplicas':: d.fn(help='"upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), + '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), withMaxReplicas(maxReplicas): { spec+: { maxReplicas: maxReplicas } }, '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), withMinReplicas(minReplicas): { spec+: { minReplicas: minReplicas } }, - '#withTargetCPUUtilizationPercentage':: d.fn(help='"target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), + '#withTargetCPUUtilizationPercentage':: d.fn(help='"targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), withTargetCPUUtilizationPercentage(targetCPUUtilizationPercentage): { spec+: { targetCPUUtilizationPercentage: targetCPUUtilizationPercentage } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet index 21610e5124c..c23f8a26d76 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerSpec.libsonnet @@ -3,18 +3,18 @@ '#':: d.pkg(name='horizontalPodAutoscalerSpec', url='', help='"specification of a horizontal pod autoscaler."'), '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleTargetRef+: { name: name } }, }, - '#withMaxReplicas':: d.fn(help='"upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), + '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), withMaxReplicas(maxReplicas): { maxReplicas: maxReplicas }, '#withMinReplicas':: d.fn(help='"minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available."', args=[d.arg(name='minReplicas', type=d.T.integer)]), withMinReplicas(minReplicas): { minReplicas: minReplicas }, - '#withTargetCPUUtilizationPercentage':: d.fn(help='"target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), + '#withTargetCPUUtilizationPercentage':: d.fn(help='"targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used."', args=[d.arg(name='targetCPUUtilizationPercentage', type=d.T.integer)]), withTargetCPUUtilizationPercentage(targetCPUUtilizationPercentage): { targetCPUUtilizationPercentage: targetCPUUtilizationPercentage }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet similarity index 51% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet index 97bab8e1e7f..d65ef26bddc 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/horizontalPodAutoscalerStatus.libsonnet @@ -1,15 +1,15 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='horizontalPodAutoscalerStatus', url='', help='"current status of a horizontal pod autoscaler"'), - '#withCurrentCPUUtilizationPercentage':: d.fn(help='"current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU."', args=[d.arg(name='currentCPUUtilizationPercentage', type=d.T.integer)]), + '#withCurrentCPUUtilizationPercentage':: d.fn(help='"currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU."', args=[d.arg(name='currentCPUUtilizationPercentage', type=d.T.integer)]), withCurrentCPUUtilizationPercentage(currentCPUUtilizationPercentage): { currentCPUUtilizationPercentage: currentCPUUtilizationPercentage }, - '#withCurrentReplicas':: d.fn(help='"current number of replicas of pods managed by this autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), + '#withCurrentReplicas':: d.fn(help='"currentReplicas is the current number of replicas of pods managed by this autoscaler."', args=[d.arg(name='currentReplicas', type=d.T.integer)]), withCurrentReplicas(currentReplicas): { currentReplicas: currentReplicas }, - '#withDesiredReplicas':: d.fn(help='"desired number of replicas of pods managed by this autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), + '#withDesiredReplicas':: d.fn(help='"desiredReplicas is the desired number of replicas of pods managed by this autoscaler."', args=[d.arg(name='desiredReplicas', type=d.T.integer)]), withDesiredReplicas(desiredReplicas): { desiredReplicas: desiredReplicas }, '#withLastScaleTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastScaleTime', type=d.T.string)]), withLastScaleTime(lastScaleTime): { lastScaleTime: lastScaleTime }, - '#withObservedGeneration':: d.fn(help='"most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + '#withObservedGeneration':: d.fn(help='"observedGeneration is the most recent generation observed by this autoscaler."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet index 62241a0c3dd..3227c261fe6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scale.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scale.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='scale', url='', help='"Scale represents a scaling request for a resource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Scale', args=[d.arg(name='name', type=d.T.string)]), @@ -53,7 +51,7 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"ScaleSpec describes the attributes of a scale subresource."'), spec: { - '#withReplicas':: d.fn(help='"desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"replicas is the desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { spec+: { replicas: replicas } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet similarity index 62% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet index a3c6898d409..129ad5527b7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v1/scaleSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleSpec.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='scaleSpec', url='', help='"ScaleSpec describes the attributes of a scale subresource."'), - '#withReplicas':: d.fn(help='"desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"replicas is the desired number of instances for the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet new file mode 100644 index 00000000000..28af9c0cc5b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v1/scaleStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleStatus', url='', help='"ScaleStatus represents the current status of a scale subresource."'), + '#withReplicas':: d.fn(help='"replicas is the actual number of observed instances of the scaled object."', args=[d.arg(name='replicas', type=d.T.integer)]), + withReplicas(replicas): { replicas: replicas }, + '#withSelector':: d.fn(help='"selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/"', args=[d.arg(name='selector', type=d.T.string)]), + withSelector(selector): { selector: selector }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet new file mode 100644 index 00000000000..9887c4c9403 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricSource.libsonnet @@ -0,0 +1,21 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResourceMetricSource', url='', help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { container: container }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet new file mode 100644 index 00000000000..70708c85b31 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/containerResourceMetricStatus.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResourceMetricStatus', url='', help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { container: container }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet new file mode 100644 index 00000000000..04c0e544aa8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/crossVersionObjectReference.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='crossVersionObjectReference', url='', help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet new file mode 100644 index 00000000000..edd8e7ffd6a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='externalMetricSource', url='', help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet new file mode 100644 index 00000000000..295eef8ea87 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/externalMetricStatus.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='externalMetricStatus', url='', help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet index b9a30db9fa2..7b3c26c5526 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscaler.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscaler.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='horizontalPodAutoscaler', url='', help='"HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of HorizontalPodAutoscaler', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'autoscaling/v2beta2', + apiVersion: 'autoscaling/v2', kind: 'HorizontalPodAutoscaler', } + self.metadata.withName(name=name), '#spec':: d.obj(help='"HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler."'), @@ -61,9 +59,9 @@ withPolicies(policies): { spec+: { behavior+: { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { spec+: { behavior+: { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { spec+: { behavior+: { scaleDown+: { selectPolicy: selectPolicy } } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { spec+: { behavior+: { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -72,19 +70,19 @@ withPolicies(policies): { spec+: { behavior+: { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { spec+: { behavior+: { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { spec+: { behavior+: { scaleUp+: { selectPolicy: selectPolicy } } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { spec+: { behavior+: { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } } }, }, }, '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { spec+: { scaleTargetRef+: { apiVersion: apiVersion } } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { scaleTargetRef+: { kind: kind } } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleTargetRef+: { name: name } } }, }, '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet similarity index 93% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet index 5cb33acd711..858546ce3c6 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerBehavior.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerBehavior.libsonnet @@ -7,9 +7,9 @@ withPolicies(policies): { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { scaleDown+: { selectPolicy: selectPolicy } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -18,9 +18,9 @@ withPolicies(policies): { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { scaleUp+: { selectPolicy: selectPolicy } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerCondition.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet similarity index 90% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet index efe9096f266..823633e412f 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerSpec.libsonnet @@ -9,9 +9,9 @@ withPolicies(policies): { behavior+: { scaleDown+: { policies: if std.isArray(v=policies) then policies else [policies] } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { behavior+: { scaleDown+: { policies+: if std.isArray(v=policies) then policies else [policies] } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { behavior+: { scaleDown+: { selectPolicy: selectPolicy } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { behavior+: { scaleDown+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } }, }, '#scaleUp':: d.obj(help='"HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen."'), @@ -20,19 +20,19 @@ withPolicies(policies): { behavior+: { scaleUp+: { policies: if std.isArray(v=policies) then policies else [policies] } } }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { behavior+: { scaleUp+: { policies+: if std.isArray(v=policies) then policies else [policies] } } }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { behavior+: { scaleUp+: { selectPolicy: selectPolicy } } }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { behavior+: { scaleUp+: { stabilizationWindowSeconds: stabilizationWindowSeconds } } }, }, }, '#scaleTargetRef':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), scaleTargetRef: { - '#withApiVersion':: d.fn(help='"API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), withApiVersion(apiVersion): { scaleTargetRef+: { apiVersion: apiVersion } }, - '#withKind':: d.fn(help='"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\\', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { scaleTargetRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleTargetRef+: { name: name } }, }, '#withMaxReplicas':: d.fn(help='"maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas."', args=[d.arg(name='maxReplicas', type=d.T.integer)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/horizontalPodAutoscalerStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet similarity index 79% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet index 43c29a9ae0d..42d501bb118 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingPolicy.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingPolicy.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='hpaScalingPolicy', url='', help='"HPAScalingPolicy is a single policy which must hold true for a specified past interval."'), - '#withPeriodSeconds':: d.fn(help='"PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min)."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), + '#withPeriodSeconds':: d.fn(help='"periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min)."', args=[d.arg(name='periodSeconds', type=d.T.integer)]), withPeriodSeconds(periodSeconds): { periodSeconds: periodSeconds }, - '#withType':: d.fn(help='"Type is used to specify the scaling policy."', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type is used to specify the scaling policy."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, - '#withValue':: d.fn(help='"Value contains the amount of change which is permitted by the policy. It must be greater than zero"', args=[d.arg(name='value', type=d.T.integer)]), + '#withValue':: d.fn(help='"value contains the amount of change which is permitted by the policy. It must be greater than zero"', args=[d.arg(name='value', type=d.T.integer)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet similarity index 93% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet index 403302875fa..7579f71dc51 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/hpaScalingRules.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/hpaScalingRules.libsonnet @@ -5,9 +5,9 @@ withPolicies(policies): { policies: if std.isArray(v=policies) then policies else [policies] }, '#withPoliciesMixin':: d.fn(help='"policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policies', type=d.T.array)]), withPoliciesMixin(policies): { policies+: if std.isArray(v=policies) then policies else [policies] }, - '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), + '#withSelectPolicy':: d.fn(help='"selectPolicy is used to specify which policy should be used. If not set, the default value Max is used."', args=[d.arg(name='selectPolicy', type=d.T.string)]), withSelectPolicy(selectPolicy): { selectPolicy: selectPolicy }, - '#withStabilizationWindowSeconds':: d.fn(help='"StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), + '#withStabilizationWindowSeconds':: d.fn(help='"stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long)."', args=[d.arg(name='stabilizationWindowSeconds', type=d.T.integer)]), withStabilizationWindowSeconds(stabilizationWindowSeconds): { stabilizationWindowSeconds: stabilizationWindowSeconds }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet similarity index 97% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet index 537b89b6cb1..4db7c791f37 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/main.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v2beta2', url='', help=''), + '#':: d.pkg(name='v2', url='', help=''), containerResourceMetricSource: (import 'containerResourceMetricSource.libsonnet'), containerResourceMetricStatus: (import 'containerResourceMetricStatus.libsonnet'), crossVersionObjectReference: (import 'crossVersionObjectReference.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricIdentifier.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricIdentifier.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/metricIdentifier.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricIdentifier.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet new file mode 100644 index 00000000000..6ad19a98920 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricSpec.libsonnet @@ -0,0 +1,141 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricSpec', url='', help='"MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once)."'), + '#containerResource':: d.obj(help='"ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + containerResource: { + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { containerResource+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { containerResource+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { containerResource+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { containerResource+: { target+: { value: value } } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { containerResource+: { container: container } }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { containerResource+: { name: name } }, + }, + '#external':: d.obj(help='"ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)."'), + external: { + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { external+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { external+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { external+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { external+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { external+: { target+: { value: value } } }, + }, + }, + '#object':: d.obj(help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + object: { + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { object+: { describedObject+: { kind: kind } } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { describedObject+: { name: name } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { object+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { object+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { object+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { object+: { target+: { value: value } } }, + }, + }, + '#pods':: d.obj(help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), + pods: { + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { pods+: { metric+: { name: name } } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { pods+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { pods+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { pods+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { pods+: { target+: { value: value } } }, + }, + }, + '#resource':: d.obj(help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + resource: { + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { resource+: { target+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { resource+: { target+: { averageValue: averageValue } } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { resource+: { target+: { type: type } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { resource+: { target+: { value: value } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { resource+: { name: name } }, + }, + '#withType':: d.fn(help='"type is the type of metric source. It should be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each mapping to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet new file mode 100644 index 00000000000..9ccb2a9f38b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricStatus.libsonnet @@ -0,0 +1,131 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricStatus', url='', help='"MetricStatus describes the last-read state of a single metric."'), + '#containerResource':: d.obj(help='"ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + containerResource: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { containerResource+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { containerResource+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { containerResource+: { current+: { value: value } } }, + }, + '#withContainer':: d.fn(help='"container is the name of the container in the pods of the scaling target"', args=[d.arg(name='container', type=d.T.string)]), + withContainer(container): { containerResource+: { container: container } }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { containerResource+: { name: name } }, + }, + '#external':: d.obj(help='"ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object."'), + external: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { external+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { external+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { external+: { current+: { value: value } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { external+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { external+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { external+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { external+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { external+: { metric+: { name: name } } }, + }, + }, + '#object':: d.obj(help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + object: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { object+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { object+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { object+: { current+: { value: value } } }, + }, + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { object+: { describedObject+: { apiVersion: apiVersion } } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { object+: { describedObject+: { kind: kind } } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { describedObject+: { name: name } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { object+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { object+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { object+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { object+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { object+: { metric+: { name: name } } }, + }, + }, + '#pods':: d.obj(help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), + pods: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { pods+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { pods+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { pods+: { current+: { value: value } } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { pods+: { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { pods+: { metric+: { selector+: { matchLabels: matchLabels } } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { pods+: { metric+: { selector+: { matchLabels+: matchLabels } } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { pods+: { metric+: { name: name } } }, + }, + }, + '#resource':: d.obj(help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + resource: { + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { resource+: { current+: { averageUtilization: averageUtilization } } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { resource+: { current+: { averageValue: averageValue } } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { resource+: { current+: { value: value } } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { resource+: { name: name } }, + }, + '#withType':: d.fn(help='"type is the type of metric source. It will be one of \\"ContainerResource\\", \\"External\\", \\"Object\\", \\"Pods\\" or \\"Resource\\", each corresponds to a matching field in the object. Note: \\"ContainerResource\\" type is available on when the feature-gate HPAContainerMetrics is enabled"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet new file mode 100644 index 00000000000..a73f1451ad0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricTarget.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricTarget', url='', help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { averageValue: averageValue }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet new file mode 100644 index 00000000000..51dec374da6 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/metricValueStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='metricValueStatus', url='', help='"MetricValueStatus holds the current value for a metric"'), + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { averageUtilization: averageUtilization }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { averageValue: averageValue }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { value: value }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet new file mode 100644 index 00000000000..efd9a4ba1b0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricSource.libsonnet @@ -0,0 +1,42 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='objectMetricSource', url='', help='"ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { describedObject+: { kind: kind } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { describedObject+: { name: name } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet new file mode 100644 index 00000000000..171bbdd97d6 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/objectMetricStatus.libsonnet @@ -0,0 +1,40 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='objectMetricStatus', url='', help='"ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object)."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#describedObject':: d.obj(help='"CrossVersionObjectReference contains enough information to let you identify the referred resource."'), + describedObject: { + '#withApiVersion':: d.fn(help='"apiVersion is the API version of the referent"', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { describedObject+: { apiVersion: apiVersion } }, + '#withKind':: d.fn(help='"kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { describedObject+: { kind: kind } }, + '#withName':: d.fn(help='"name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { describedObject+: { name: name } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet new file mode 100644 index 00000000000..ecb6d1b6de8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podsMetricSource', url='', help='"PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value."'), + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet new file mode 100644 index 00000000000..3e1642d991d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/podsMetricStatus.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podsMetricStatus', url='', help='"PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second)."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#metric':: d.obj(help='"MetricIdentifier defines the name and optionally selector for a metric"'), + metric: { + '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + selector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { metric+: { selector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { metric+: { selector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { metric+: { selector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { metric+: { selector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"name is the name of the given metric"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metric+: { name: name } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet new file mode 100644 index 00000000000..6417c9ad6cd --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricSource.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceMetricSource', url='', help='"ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source. Only one \\"target\\" type should be set."'), + '#target':: d.obj(help='"MetricTarget defines the target value, average value, or average utilization of a specific metric"'), + target: { + '#withAverageUtilization':: d.fn(help='"averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type"', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { target+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { target+: { averageValue: averageValue } }, + '#withType':: d.fn(help='"type represents whether the metric type is Utilization, Value, or AverageValue"', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { target+: { type: type } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { target+: { value: value } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet new file mode 100644 index 00000000000..7b6da96e998 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/autoscaling/v2/resourceMetricStatus.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceMetricStatus', url='', help='"ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \\"pods\\" source."'), + '#current':: d.obj(help='"MetricValueStatus holds the current value for a metric"'), + current: { + '#withAverageUtilization':: d.fn(help='"currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods."', args=[d.arg(name='averageUtilization', type=d.T.integer)]), + withAverageUtilization(averageUtilization): { current+: { averageUtilization: averageUtilization } }, + '#withAverageValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='averageValue', type=d.T.string)]), + withAverageValue(averageValue): { current+: { averageValue: averageValue } }, + '#withValue':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='value', type=d.T.string)]), + withValue(value): { current+: { value: value } }, + }, + '#withName':: d.fn(help='"name is the name of the resource in question."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet index f2a9271adbe..7d9c1089d85 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='batch', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet index 3a7d672898a..8966fb9e8f0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJob.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJob.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='cronJob', url='', help='"CronJob represents the configuration of a single cron job."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CronJob', args=[d.arg(name='name', type=d.T.string)]), @@ -57,12 +55,10 @@ jobTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { jobTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -73,21 +69,21 @@ withFinalizers(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { jobTemplate+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { jobTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { jobTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { jobTemplate+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { jobTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { jobTemplate+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -95,13 +91,20 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { jobTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { jobTemplate+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { jobTemplate+: { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { jobTemplate+: { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -117,12 +120,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -133,21 +134,21 @@ withFinalizers(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, @@ -155,9 +156,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -216,6 +217,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { os+: { name: name } } } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -231,7 +237,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } } }, @@ -242,26 +248,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -276,9 +284,9 @@ withDnsPolicy(dnsPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } } }, @@ -290,11 +298,13 @@ withHostNetwork(hostNetwork): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostUsers: hostUsers } } } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } } }, @@ -306,26 +316,34 @@ withNodeSelector(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -356,21 +374,27 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { jobTemplate+: { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } } } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { jobTemplate+: { spec+: { completionMode: completionMode } } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { jobTemplate+: { spec+: { completions: completions } } } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { jobTemplate+: { spec+: { manualSelector: manualSelector } } } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { jobTemplate+: { spec+: { maxFailedIndexes: maxFailedIndexes } } } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { jobTemplate+: { spec+: { parallelism: parallelism } } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { jobTemplate+: { spec+: { podReplacementPolicy: podReplacementPolicy } } } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { jobTemplate+: { spec+: { suspend: suspend } } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } } }, }, }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), + '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), withConcurrencyPolicy(concurrencyPolicy): { spec+: { concurrencyPolicy: concurrencyPolicy } }, '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), withFailedJobsHistoryLimit(failedJobsHistoryLimit): { spec+: { failedJobsHistoryLimit: failedJobsHistoryLimit } }, @@ -382,6 +406,8 @@ withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { spec+: { successfulJobsHistoryLimit: successfulJobsHistoryLimit } }, '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, + '#withTimeZone':: d.fn(help='"The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones"', args=[d.arg(name='timeZone', type=d.T.string)]), + withTimeZone(timeZone): { spec+: { timeZone: timeZone } }, }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet index 8af5f1b1234..3cae089c594 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/cronJobSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobSpec.libsonnet @@ -5,12 +5,10 @@ jobTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { jobTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { jobTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { jobTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { jobTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { jobTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { jobTemplate+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { jobTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { jobTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { jobTemplate+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { jobTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { jobTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { jobTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { jobTemplate+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,13 +41,20 @@ withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { jobTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { jobTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { jobTemplate+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { jobTemplate+: { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { jobTemplate+: { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -65,12 +70,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations: annotations } } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { jobTemplate+: { spec+: { template+: { metadata+: { annotations+: annotations } } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { jobTemplate+: { spec+: { template+: { metadata+: { clusterName: clusterName } } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { jobTemplate+: { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -81,21 +84,21 @@ withFinalizers(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { jobTemplate+: { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { jobTemplate+: { spec+: { template+: { metadata+: { generateName: generateName } } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { jobTemplate+: { spec+: { template+: { metadata+: { generation: generation } } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels: labels } } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { jobTemplate+: { spec+: { template+: { metadata+: { labels+: labels } } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { jobTemplate+: { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { jobTemplate+: { spec+: { template+: { metadata+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { jobTemplate+: { spec+: { template+: { metadata+: { namespace: namespace } } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, @@ -103,9 +106,9 @@ withOwnerReferencesMixin(ownerReferences): { jobTemplate+: { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { jobTemplate+: { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { jobTemplate+: { spec+: { template+: { metadata+: { selfLink: selfLink } } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { jobTemplate+: { spec+: { template+: { metadata+: { uid: uid } } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -164,6 +167,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { jobTemplate+: { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { jobTemplate+: { spec+: { template+: { spec+: { os+: { name: name } } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -179,7 +187,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } } }, @@ -190,26 +198,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { jobTemplate+: { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -224,9 +234,9 @@ withDnsPolicy(dnsPolicy): { jobTemplate+: { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { jobTemplate+: { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { jobTemplate+: { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { jobTemplate+: { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } } }, @@ -238,11 +248,13 @@ withHostNetwork(hostNetwork): { jobTemplate+: { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { jobTemplate+: { spec+: { template+: { spec+: { hostPID: hostPID } } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { jobTemplate+: { spec+: { template+: { spec+: { hostUsers: hostUsers } } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { jobTemplate+: { spec+: { template+: { spec+: { hostname: hostname } } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { jobTemplate+: { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { jobTemplate+: { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } } }, @@ -254,26 +266,34 @@ withNodeSelector(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { jobTemplate+: { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead: overhead } } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { jobTemplate+: { spec+: { template+: { spec+: { overhead+: overhead } } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { jobTemplate+: { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { jobTemplate+: { spec+: { template+: { spec+: { priority: priority } } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { jobTemplate+: { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { jobTemplate+: { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { jobTemplate+: { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { jobTemplate+: { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { jobTemplate+: { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { jobTemplate+: { spec+: { template+: { spec+: { schedulerName: schedulerName } } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { jobTemplate+: { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { jobTemplate+: { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -304,21 +324,27 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { jobTemplate+: { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { jobTemplate+: { spec+: { backoffLimit: backoffLimit } } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { jobTemplate+: { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { jobTemplate+: { spec+: { completionMode: completionMode } } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { jobTemplate+: { spec+: { completions: completions } } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { jobTemplate+: { spec+: { manualSelector: manualSelector } } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { jobTemplate+: { spec+: { maxFailedIndexes: maxFailedIndexes } } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { jobTemplate+: { spec+: { parallelism: parallelism } } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { jobTemplate+: { spec+: { podReplacementPolicy: podReplacementPolicy } } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { jobTemplate+: { spec+: { suspend: suspend } } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { jobTemplate+: { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } } }, }, }, - '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are: - \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), + '#withConcurrencyPolicy':: d.fn(help="\"Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\\"Allow\\\" (default): allows CronJobs to run concurrently; - \\\"Forbid\\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\\"Replace\\\": cancels currently running job and replaces it with a new one\"", args=[d.arg(name='concurrencyPolicy', type=d.T.string)]), withConcurrencyPolicy(concurrencyPolicy): { concurrencyPolicy: concurrencyPolicy }, '#withFailedJobsHistoryLimit':: d.fn(help='"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1."', args=[d.arg(name='failedJobsHistoryLimit', type=d.T.integer)]), withFailedJobsHistoryLimit(failedJobsHistoryLimit): { failedJobsHistoryLimit: failedJobsHistoryLimit }, @@ -330,6 +356,8 @@ withSuccessfulJobsHistoryLimit(successfulJobsHistoryLimit): { successfulJobsHistoryLimit: successfulJobsHistoryLimit }, '#withSuspend':: d.fn(help='"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { suspend: suspend }, + '#withTimeZone':: d.fn(help='"The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones"', args=[d.arg(name='timeZone', type=d.T.string)]), + withTimeZone(timeZone): { timeZone: timeZone }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1beta1/cronJobStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/cronJobStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet index bd07c0ceebc..d4fea6172ab 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/job.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/job.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='job', url='', help='"Job represents the configuration of a single job."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Job', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,13 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -68,12 +73,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -84,21 +87,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -106,9 +109,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -167,6 +170,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -182,7 +190,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -193,26 +201,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -227,9 +237,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -241,11 +251,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -257,26 +269,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -307,17 +327,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { completions: completions } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { maxFailedIndexes: maxFailedIndexes } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { podReplacementPolicy: podReplacementPolicy } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet index dee8948c13c..3f02a1c564a 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobSpec.libsonnet @@ -1,6 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='jobSpec', url='', help='"JobSpec describes how the job execution will look like."'), + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -16,12 +23,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -32,21 +37,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -54,9 +59,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -115,6 +120,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -130,7 +140,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -141,26 +151,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -175,9 +187,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -189,11 +201,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -205,26 +219,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -255,17 +277,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { activeDeadlineSeconds: activeDeadlineSeconds }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { backoffLimit: backoffLimit }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { backoffLimitPerIndex: backoffLimitPerIndex }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { completionMode: completionMode }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { completions: completions }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { manualSelector: manualSelector }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { maxFailedIndexes: maxFailedIndexes }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { parallelism: parallelism }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { podReplacementPolicy: podReplacementPolicy }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { suspend: suspend }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { ttlSecondsAfterFinished: ttlSecondsAfterFinished }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet index 1bb4ea9d808..f05416634de 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobStatus.libsonnet @@ -1,9 +1,20 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='jobStatus', url='', help='"JobStatus represents the current state of a Job."'), - '#withActive':: d.fn(help='"The number of actively running pods."', args=[d.arg(name='active', type=d.T.integer)]), + '#uncountedTerminatedPods':: d.obj(help="\"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.\""), + uncountedTerminatedPods: { + '#withFailed':: d.fn(help='"failed holds UIDs of failed Pods."', args=[d.arg(name='failed', type=d.T.array)]), + withFailed(failed): { uncountedTerminatedPods+: { failed: if std.isArray(v=failed) then failed else [failed] } }, + '#withFailedMixin':: d.fn(help='"failed holds UIDs of failed Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='failed', type=d.T.array)]), + withFailedMixin(failed): { uncountedTerminatedPods+: { failed+: if std.isArray(v=failed) then failed else [failed] } }, + '#withSucceeded':: d.fn(help='"succeeded holds UIDs of succeeded Pods."', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceeded(succeeded): { uncountedTerminatedPods+: { succeeded: if std.isArray(v=succeeded) then succeeded else [succeeded] } }, + '#withSucceededMixin':: d.fn(help='"succeeded holds UIDs of succeeded Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceededMixin(succeeded): { uncountedTerminatedPods+: { succeeded+: if std.isArray(v=succeeded) then succeeded else [succeeded] } }, + }, + '#withActive':: d.fn(help='"The number of pending and running pods."', args=[d.arg(name='active', type=d.T.integer)]), withActive(active): { active: active }, - '#withCompletedIndexes':: d.fn(help='"CompletedIndexes holds the completed indexes when .spec.completionMode = \\"Indexed\\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\"."', args=[d.arg(name='completedIndexes', type=d.T.string)]), + '#withCompletedIndexes':: d.fn(help='"completedIndexes holds the completed indexes when .spec.completionMode = \\"Indexed\\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\"."', args=[d.arg(name='completedIndexes', type=d.T.string)]), withCompletedIndexes(completedIndexes): { completedIndexes: completedIndexes }, '#withCompletionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='completionTime', type=d.T.string)]), withCompletionTime(completionTime): { completionTime: completionTime }, @@ -13,10 +24,16 @@ withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, '#withFailed':: d.fn(help='"The number of pods which reached phase Failed."', args=[d.arg(name='failed', type=d.T.integer)]), withFailed(failed): { failed: failed }, + '#withFailedIndexes':: d.fn(help='"FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \\"1,3-5,7\\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='failedIndexes', type=d.T.string)]), + withFailedIndexes(failedIndexes): { failedIndexes: failedIndexes }, + '#withReady':: d.fn(help='"The number of pods which have a Ready condition."', args=[d.arg(name='ready', type=d.T.integer)]), + withReady(ready): { ready: ready }, '#withStartTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='startTime', type=d.T.string)]), withStartTime(startTime): { startTime: startTime }, '#withSucceeded':: d.fn(help='"The number of pods which reached phase Succeeded."', args=[d.arg(name='succeeded', type=d.T.integer)]), withSucceeded(succeeded): { succeeded: succeeded }, + '#withTerminating':: d.fn(help='"The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\\n\\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default)."', args=[d.arg(name='terminating', type=d.T.integer)]), + withTerminating(terminating): { terminating: terminating }, '#mixin': 'ignore', mixin: self, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet index 5b4af07f25b..18b9fded040 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/jobTemplateSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/jobTemplateSpec.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='jobTemplateSpec', url='', help='"JobTemplateSpec describes the data a Job should have when created from a template"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,13 +39,20 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"JobSpec describes how the job execution will look like."'), spec: { + '#podFailurePolicy':: d.obj(help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + podFailurePolicy: { + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { podFailurePolicy+: { rules: if std.isArray(v=rules) then rules else [rules] } } }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { podFailurePolicy+: { rules+: if std.isArray(v=rules) then rules else [rules] } } }, + }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), selector: { '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), @@ -63,12 +68,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -79,21 +82,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -101,9 +104,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -162,6 +165,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -177,7 +185,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -188,26 +196,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -222,9 +232,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -236,11 +246,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -252,26 +264,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), @@ -302,17 +322,23 @@ withActiveDeadlineSeconds(activeDeadlineSeconds): { spec+: { activeDeadlineSeconds: activeDeadlineSeconds } }, '#withBackoffLimit':: d.fn(help='"Specifies the number of retries before marking this job failed. Defaults to 6"', args=[d.arg(name='backoffLimit', type=d.T.integer)]), withBackoffLimit(backoffLimit): { spec+: { backoffLimit: backoffLimit } }, - '#withCompletionMode':: d.fn(help="\"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\\n\\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), + '#withBackoffLimitPerIndex':: d.fn(help="\"Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\"", args=[d.arg(name='backoffLimitPerIndex', type=d.T.integer)]), + withBackoffLimitPerIndex(backoffLimitPerIndex): { spec+: { backoffLimitPerIndex: backoffLimitPerIndex } }, + '#withCompletionMode':: d.fn(help="\"completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\\n\\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\\n\\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\\n\\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.\"", args=[d.arg(name='completionMode', type=d.T.string)]), withCompletionMode(completionMode): { spec+: { completionMode: completionMode } }, - '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), + '#withCompletions':: d.fn(help='"Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='completions', type=d.T.integer)]), withCompletions(completions): { spec+: { completions: completions } }, '#withManualSelector':: d.fn(help='"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector"', args=[d.arg(name='manualSelector', type=d.T.boolean)]), withManualSelector(manualSelector): { spec+: { manualSelector: manualSelector } }, + '#withMaxFailedIndexes':: d.fn(help='"Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)."', args=[d.arg(name='maxFailedIndexes', type=d.T.integer)]), + withMaxFailedIndexes(maxFailedIndexes): { spec+: { maxFailedIndexes: maxFailedIndexes } }, '#withParallelism':: d.fn(help='"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"', args=[d.arg(name='parallelism', type=d.T.integer)]), withParallelism(parallelism): { spec+: { parallelism: parallelism } }, - '#withSuspend':: d.fn(help='"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), + '#withPodReplacementPolicy':: d.fn(help='"podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\\n when they are terminating (has a metadata.deletionTimestamp) or failed.\\n- Failed means to wait until a previously created Pod is fully terminated (has phase\\n Failed or Succeeded) before creating a replacement Pod.\\n\\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default."', args=[d.arg(name='podReplacementPolicy', type=d.T.string)]), + withPodReplacementPolicy(podReplacementPolicy): { spec+: { podReplacementPolicy: podReplacementPolicy } }, + '#withSuspend':: d.fn(help='"suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false."', args=[d.arg(name='suspend', type=d.T.boolean)]), withSuspend(suspend): { spec+: { suspend: suspend } }, - '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), + '#withTtlSecondsAfterFinished':: d.fn(help="\"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.\"", args=[d.arg(name='ttlSecondsAfterFinished', type=d.T.integer)]), withTtlSecondsAfterFinished(ttlSecondsAfterFinished): { spec+: { ttlSecondsAfterFinished: ttlSecondsAfterFinished } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet index 1d5e7ebdf2e..d3df3a55591 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/batch/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/main.libsonnet @@ -9,4 +9,9 @@ jobSpec: (import 'jobSpec.libsonnet'), jobStatus: (import 'jobStatus.libsonnet'), jobTemplateSpec: (import 'jobTemplateSpec.libsonnet'), + podFailurePolicy: (import 'podFailurePolicy.libsonnet'), + podFailurePolicyOnExitCodesRequirement: (import 'podFailurePolicyOnExitCodesRequirement.libsonnet'), + podFailurePolicyOnPodConditionsPattern: (import 'podFailurePolicyOnPodConditionsPattern.libsonnet'), + podFailurePolicyRule: (import 'podFailurePolicyRule.libsonnet'), + uncountedTerminatedPods: (import 'uncountedTerminatedPods.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet new file mode 100644 index 00000000000..29a48411984 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicy', url='', help='"PodFailurePolicy describes how failed pods influence the backoffLimit."'), + '#withRules':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, + '#withRulesMixin':: d.fn(help='"A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet new file mode 100644 index 00000000000..0a81d286d36 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnExitCodesRequirement.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyOnExitCodesRequirement', url='', help='"PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check."'), + '#withContainerName':: d.fn(help='"Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template."', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { containerName: containerName }, + '#withOperator':: d.fn(help="\"Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\\n\\n- In: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is in the set of specified values.\\n- NotIn: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is not in the set of specified values.\\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"", args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet new file mode 100644 index 00000000000..6037e13c8ae --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyOnPodConditionsPattern.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyOnPodConditionsPattern', url='', help='"PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type."'), + '#withType':: d.fn(help='"Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet new file mode 100644 index 00000000000..9134be64ec4 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/podFailurePolicyRule.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podFailurePolicyRule', url='', help='"PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule."'), + '#onExitCodes':: d.obj(help='"PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check."'), + onExitCodes: { + '#withContainerName':: d.fn(help='"Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template."', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { onExitCodes+: { containerName: containerName } }, + '#withOperator':: d.fn(help="\"Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\\n\\n- In: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is in the set of specified values.\\n- NotIn: the requirement is satisfied if at least one container exit code\\n (might be multiple if there are multiple containers not restricted\\n by the 'containerName' field) is not in the set of specified values.\\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { onExitCodes+: { operator: operator } }, + '#withValues':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"", args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { onExitCodes+: { values: if std.isArray(v=values) then values else [values] } }, + '#withValuesMixin':: d.fn(help="\"Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { onExitCodes+: { values+: if std.isArray(v=values) then values else [values] } }, + }, + '#withAction':: d.fn(help="\"Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\\n\\n- FailJob: indicates that the pod's job is marked as Failed and all\\n running pods are terminated.\\n- FailIndex: indicates that the pod's index is marked as Failed and will\\n not be restarted.\\n This value is beta-level. It can be used when the\\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\\n- Ignore: indicates that the counter towards the .backoffLimit is not\\n incremented and a replacement pod is created.\\n- Count: indicates that the pod is handled in the default way - the\\n counter towards the .backoffLimit is incremented.\\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\"", args=[d.arg(name='action', type=d.T.string)]), + withAction(action): { action: action }, + '#withOnPodConditions':: d.fn(help='"Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed."', args=[d.arg(name='onPodConditions', type=d.T.array)]), + withOnPodConditions(onPodConditions): { onPodConditions: if std.isArray(v=onPodConditions) then onPodConditions else [onPodConditions] }, + '#withOnPodConditionsMixin':: d.fn(help='"Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='onPodConditions', type=d.T.array)]), + withOnPodConditionsMixin(onPodConditions): { onPodConditions+: if std.isArray(v=onPodConditions) then onPodConditions else [onPodConditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet new file mode 100644 index 00000000000..482a57a3b19 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/batch/v1/uncountedTerminatedPods.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='uncountedTerminatedPods', url='', help="\"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.\""), + '#withFailed':: d.fn(help='"failed holds UIDs of failed Pods."', args=[d.arg(name='failed', type=d.T.array)]), + withFailed(failed): { failed: if std.isArray(v=failed) then failed else [failed] }, + '#withFailedMixin':: d.fn(help='"failed holds UIDs of failed Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='failed', type=d.T.array)]), + withFailedMixin(failed): { failed+: if std.isArray(v=failed) then failed else [failed] }, + '#withSucceeded':: d.fn(help='"succeeded holds UIDs of succeeded Pods."', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceeded(succeeded): { succeeded: if std.isArray(v=succeeded) then succeeded else [succeeded] }, + '#withSucceededMixin':: d.fn(help='"succeeded holds UIDs of succeeded Pods."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='succeeded', type=d.T.array)]), + withSucceededMixin(succeeded): { succeeded+: if std.isArray(v=succeeded) then succeeded else [succeeded] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet index 2fc91e17e6c..a1350a0bee7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/main.libsonnet @@ -2,5 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='certificates', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet index 45845724d08..ae3c70e38ca 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequest.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequest.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='certificateSigningRequest', url='', help='"CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\\n\\nKubelets use this API to obtain:\\n 1. client certificates to authenticate to kube-apiserver (with the \\"kubernetes.io/kube-apiserver-client-kubelet\\" signerName).\\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \\"kubernetes.io/kubelet-serving\\" signerName).\\n\\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \\"kubernetes.io/kube-apiserver-client\\" signerName), or to obtain certificates from custom non-Kubernetes signers."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CertificateSigningRequest', args=[d.arg(name='name', type=d.T.string)]), @@ -53,6 +51,8 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"CertificateSigningRequestSpec contains the certificate request."'), spec: { + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\\n\\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\\n\\nCertificate signers may not honor this field for various reasons:\\n\\n 1. Old signer that is unaware of the field (such as the in-tree\\n implementations prior to v1.22)\\n 2. Signer whose configured maximum is shorter than the requested duration\\n 3. Signer whose configured minimum is longer than the requested duration\\n\\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { spec+: { expirationSeconds: expirationSeconds } }, '#withExtra':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."', args=[d.arg(name='extra', type=d.T.object)]), withExtra(extra): { spec+: { extra: extra } }, '#withExtraMixin':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet index 49712d490d6..34fd3daa912 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestSpec.libsonnet @@ -1,6 +1,8 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='certificateSigningRequestSpec', url='', help='"CertificateSigningRequestSpec contains the certificate request."'), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\\n\\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\\n\\nCertificate signers may not honor this field for various reasons:\\n\\n 1. Old signer that is unaware of the field (such as the in-tree\\n implementations prior to v1.22)\\n 2. Signer whose configured maximum is shorter than the requested duration\\n 3. Signer whose configured minimum is longer than the requested duration\\n\\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, '#withExtra':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."', args=[d.arg(name='extra', type=d.T.object)]), withExtra(extra): { extra: extra }, '#withExtraMixin':: d.fn(help='"extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='extra', type=d.T.object)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/certificateSigningRequestStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/certificates/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet new file mode 100644 index 00000000000..939b7162a99 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundle.libsonnet @@ -0,0 +1,61 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundle', url='', help='"ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\\n\\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\\n\\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ClusterTrustBundle', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'certificates.k8s.io/v1alpha1', + kind: 'ClusterTrustBundle', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ClusterTrustBundleSpec contains the signer and trust anchors."'), + spec: { + '#withSignerName':: d.fn(help="\"signerName indicates the associated signer, if any.\\n\\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\\n\\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\\n\\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\\n\\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.\"", args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { spec+: { signerName: signerName } }, + '#withTrustBundle':: d.fn(help='"trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\\n\\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\\n\\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data."', args=[d.arg(name='trustBundle', type=d.T.string)]), + withTrustBundle(trustBundle): { spec+: { trustBundle: trustBundle } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet new file mode 100644 index 00000000000..1c3be20f16d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/clusterTrustBundleSpec.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundleSpec', url='', help='"ClusterTrustBundleSpec contains the signer and trust anchors."'), + '#withSignerName':: d.fn(help="\"signerName indicates the associated signer, if any.\\n\\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\\n\\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\\n\\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\\n\\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.\"", args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { signerName: signerName }, + '#withTrustBundle':: d.fn(help='"trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\\n\\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\\n\\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data."', args=[d.arg(name='trustBundle', type=d.T.string)]), + withTrustBundle(trustBundle): { trustBundle: trustBundle }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..697fa66fc89 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/certificates/v1alpha1/main.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + clusterTrustBundle: (import 'clusterTrustBundle.libsonnet'), + clusterTrustBundleSpec: (import 'clusterTrustBundleSpec.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet index 9d8b2649c6c..cad91a8c74b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='coordination', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet index 1baaa20ae64..4e89e2b3ac4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/lease.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/lease.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='lease', url='', help='"Lease defines a lease concept."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Lease', args=[d.arg(name='name', type=d.T.string)]), @@ -57,7 +55,7 @@ withAcquireTime(acquireTime): { spec+: { acquireTime: acquireTime } }, '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), withHolderIdentity(holderIdentity): { spec+: { holderIdentity: holderIdentity } }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), + '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), withLeaseDurationSeconds(leaseDurationSeconds): { spec+: { leaseDurationSeconds: leaseDurationSeconds } }, '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), withLeaseTransitions(leaseTransitions): { spec+: { leaseTransitions: leaseTransitions } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet similarity index 95% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet index 3f501306ad4..af47be3597c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/leaseSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/leaseSpec.libsonnet @@ -5,7 +5,7 @@ withAcquireTime(acquireTime): { acquireTime: acquireTime }, '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), withHolderIdentity(holderIdentity): { holderIdentity: holderIdentity }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), + '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), withLeaseDurationSeconds(leaseDurationSeconds): { leaseDurationSeconds: leaseDurationSeconds }, '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), withLeaseTransitions(leaseTransitions): { leaseTransitions: leaseTransitions }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/coordination/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/affinity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/affinity.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/affinity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/affinity.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/attachedVolume.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/attachedVolume.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/attachedVolume.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/attachedVolume.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet new file mode 100644 index 00000000000..61e425f4bc0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/awsElasticBlockStoreVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='awsElasticBlockStoreVolumeSource', url='', help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + withVolumeID(volumeID): { volumeID: volumeID }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..195149ef3ba --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureDiskVolumeSource.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureDiskVolumeSource', url='', help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + withCachingMode(cachingMode): { cachingMode: cachingMode }, + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + withDiskName(diskName): { diskName: diskName }, + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + withDiskURI(diskURI): { diskURI: diskURI }, + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..38119e4afd5 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFilePersistentVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureFilePersistentVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + withSecretNamespace(secretNamespace): { secretNamespace: secretNamespace }, + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { shareName: shareName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet new file mode 100644 index 00000000000..04663cea5fc --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/azureFileVolumeSource.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='azureFileVolumeSource', url='', help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + withShareName(shareName): { shareName: shareName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet index 43d95c1b961..3746613fcad 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/binding.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/binding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='binding', url='', help='"Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Binding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/capabilities.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/capabilities.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/capabilities.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/capabilities.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..1215a07c221 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSPersistentVolumeSource.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='cephFSPersistentVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { secretFile: secretFile }, + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet new file mode 100644 index 00000000000..ff61be5fc51 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cephFSVolumeSource.libsonnet @@ -0,0 +1,23 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='cephFSVolumeSource', url='', help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + withSecretFile(secretFile): { secretFile: secretFile }, + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet similarity index 52% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet index 8d2af89ee9f..ca00d729ebb 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderPersistentVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderPersistentVolumeSource.libsonnet @@ -3,16 +3,16 @@ '#':: d.pkg(name='cinderPersistentVolumeSource', url='', help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { secretRef+: { namespace: namespace } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet similarity index 54% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet index 1479ec43262..68b81ae4361 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/cinderVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/cinderVolumeSource.libsonnet @@ -6,11 +6,11 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet new file mode 100644 index 00000000000..8753fafe3fc --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/claimSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='claimSource', url='', help='"ClaimSource describes a reference to a ResourceClaim.\\n\\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value."'), + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { resourceClaimName: resourceClaimName }, + '#withResourceClaimTemplateName':: d.fn(help='"ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\\n\\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\\n\\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."', args=[d.arg(name='resourceClaimTemplateName', type=d.T.string)]), + withResourceClaimTemplateName(resourceClaimTemplateName): { resourceClaimTemplateName: resourceClaimTemplateName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/clientIPConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clientIPConfig.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/clientIPConfig.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clientIPConfig.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet new file mode 100644 index 00000000000..0aa0424b5cd --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/clusterTrustBundleProjection.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='clusterTrustBundleProjection', url='', help='"ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem."'), + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { signerName: signerName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentCondition.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet index 87b93c0128b..3fb5e6e5ec2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/componentStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/componentStatus.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='componentStatus', url='', help='"ComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ComponentStatus', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet index 09013c5f2cd..126d23c34ea 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMap.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMap.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='configMap', url='', help='"ConfigMap holds configuration data for pods to consume."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ConfigMap', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapEnvSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapEnvSource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapEnvSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapEnvSource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapKeySelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapKeySelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapKeySelector.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapKeySelector.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet similarity index 90% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet index 251e5e51933..f3271d8907c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/configMapNodeConfigSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapNodeConfigSource.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='configMapNodeConfigSource', url='', help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#':: d.pkg(name='configMapNodeConfigSource', url='', help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { kubeletConfigKey: kubeletConfigKey }, '#withName':: d.fn(help='"Name is the metadata.name of the referenced ConfigMap. This field is required in all cases."', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet new file mode 100644 index 00000000000..3059d353b00 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapProjection.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configMapProjection', url='', help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet new file mode 100644 index 00000000000..ec7e2d315a3 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/configMapVolumeSource.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='configMapVolumeSource', url='', help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet similarity index 78% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet index 1828a2297cc..8a9e4ec56b9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/container.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/container.libsonnet @@ -3,7 +3,7 @@ '#':: d.pkg(name='container', url='', help='"A single application container that you want to run within a pod."'), '#lifecycle':: d.obj(help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), lifecycle: { - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -27,6 +27,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -35,7 +40,7 @@ withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -59,6 +64,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -77,6 +87,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -107,7 +124,7 @@ withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, @@ -121,6 +138,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -151,20 +175,24 @@ withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, }, '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#securityContext':: d.obj(help='"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence."'), @@ -193,7 +221,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -204,22 +232,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { securityContext+: { privileged: privileged } }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { securityContext+: { procMount: procMount } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, }, '#startupProbe':: d.obj(help='"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic."'), @@ -231,6 +261,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -261,18 +298,18 @@ withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, }, - '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), withArgs(args): { args: if std.isArray(v=args) then args else [args] }, - '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, - '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), withCommand(command): { command: if std.isArray(v=command) then command else [command] }, - '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, '#withEnv':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."', args=[d.arg(name='env', type=d.T.array)]), withEnv(env): { env: if std.isArray(v=env) then env else [env] }, @@ -282,16 +319,22 @@ withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, - '#withImage':: d.fn(help='"Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets."', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, '#withImagePullPolicy':: d.fn(help='"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, '#withName':: d.fn(help='"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withPorts':: d.fn(help='"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Cannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), + '#withPorts':: d.fn(help='"List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated."', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, - '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + '#withPortsMixin':: d.fn(help='"List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \\"0.0.0.0\\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help="\"RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \\\"Always\\\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \\\"Always\\\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \\\"Always\\\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \\\"sidecar\\\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.\"", args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), withStdin(stdin): { stdin: stdin }, '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet similarity index 61% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet index 01cd00d3867..d5dfb1d671e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerImage.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerImage.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='containerImage', url='', help='"Describe a container image"'), - '#withNames':: d.fn(help='"Names by which this image is known. e.g. [\\"k8s.gcr.io/hyperkube:v1.0.7\\", \\"dockerhub.io/google_containers/hyperkube:v1.0.7\\"]"', args=[d.arg(name='names', type=d.T.array)]), + '#withNames':: d.fn(help='"Names by which this image is known. e.g. [\\"kubernetes.example/hyperkube:v1.0.7\\", \\"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\\"]"', args=[d.arg(name='names', type=d.T.array)]), withNames(names): { names: if std.isArray(v=names) then names else [names] }, - '#withNamesMixin':: d.fn(help='"Names by which this image is known. e.g. [\\"k8s.gcr.io/hyperkube:v1.0.7\\", \\"dockerhub.io/google_containers/hyperkube:v1.0.7\\"]"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='names', type=d.T.array)]), + '#withNamesMixin':: d.fn(help='"Names by which this image is known. e.g. [\\"kubernetes.example/hyperkube:v1.0.7\\", \\"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\\"]"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='names', type=d.T.array)]), withNamesMixin(names): { names+: if std.isArray(v=names) then names else [names] }, '#withSizeBytes':: d.fn(help='"The size of the image in bytes."', args=[d.arg(name='sizeBytes', type=d.T.integer)]), withSizeBytes(sizeBytes): { sizeBytes: sizeBytes }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerPort.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerPort.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerPort.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet new file mode 100644 index 00000000000..430d614a83d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerResizePolicy.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='containerResizePolicy', url='', help='"ContainerResizePolicy represents resource resize policy for the container."'), + '#withResourceName':: d.fn(help='"Name of the resource to which this resource resize policy applies. Supported values: cpu, memory."', args=[d.arg(name='resourceName', type=d.T.string)]), + withResourceName(resourceName): { resourceName: resourceName }, + '#withRestartPolicy':: d.fn(help='"Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet similarity index 96% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet index fb89d77e794..0a29b350d26 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerState.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerState.libsonnet @@ -8,7 +8,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { terminated+: { containerID: containerID } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { terminated+: { exitCode: exitCode } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateRunning.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateRunning.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateRunning.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateRunning.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet similarity index 94% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet index a3c15616e47..f0e13bac143 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateTerminated.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateTerminated.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='containerStateTerminated', url='', help='"ContainerStateTerminated is a terminated state of a container."'), - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { containerID: containerID }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { exitCode: exitCode }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateWaiting.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateWaiting.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStateWaiting.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStateWaiting.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet similarity index 51% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet index f637415a0ae..886d4145657 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/containerStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/containerStatus.libsonnet @@ -10,7 +10,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { lastState+: { terminated+: { containerID: containerID } } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { lastState+: { terminated+: { exitCode: exitCode } } }, @@ -33,6 +33,21 @@ withReason(reason): { lastState+: { waiting+: { reason: reason } } }, }, }, + '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { resources+: { limits: limits } }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { resources+: { limits+: limits } }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { resources+: { requests: requests } }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { resources+: { requests+: requests } }, + }, '#state':: d.obj(help='"ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting."'), state: { '#running':: d.obj(help='"ContainerStateRunning is a running state of a container."'), @@ -42,7 +57,7 @@ }, '#terminated':: d.obj(help='"ContainerStateTerminated is a terminated state of a container."'), terminated: { - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withContainerID':: d.fn(help="\"Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { state+: { terminated+: { containerID: containerID } } }, '#withExitCode':: d.fn(help='"Exit status from the last termination of the container"', args=[d.arg(name='exitCode', type=d.T.integer)]), withExitCode(exitCode): { state+: { terminated+: { exitCode: exitCode } } }, @@ -65,19 +80,23 @@ withReason(reason): { state+: { waiting+: { reason: reason } } }, }, }, - '#withContainerID':: d.fn(help="\"Container's ID in the format 'docker://\u003ccontainer_id\u003e'.\"", args=[d.arg(name='containerID', type=d.T.string)]), + '#withAllocatedResources':: d.fn(help='"AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize."', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResources(allocatedResources): { allocatedResources: allocatedResources }, + '#withAllocatedResourcesMixin':: d.fn(help='"AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResourcesMixin(allocatedResources): { allocatedResources+: allocatedResources }, + '#withContainerID':: d.fn(help="\"ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \\\"containerd\\\").\"", args=[d.arg(name='containerID', type=d.T.string)]), withContainerID(containerID): { containerID: containerID }, - '#withImage':: d.fn(help='"The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images."', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, - '#withImageID':: d.fn(help="\"ImageID of the container's image.\"", args=[d.arg(name='imageID', type=d.T.string)]), + '#withImageID':: d.fn(help="\"ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.\"", args=[d.arg(name='imageID', type=d.T.string)]), withImageID(imageID): { imageID: imageID }, - '#withName':: d.fn(help='"This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withReady':: d.fn(help='"Specifies whether the container has passed its readiness probe."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\\n\\nThe value is typically used to determine whether a container is ready to accept traffic."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { ready: ready }, - '#withRestartCount':: d.fn(help='"The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC."', args=[d.arg(name='restartCount', type=d.T.integer)]), + '#withRestartCount':: d.fn(help='"RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative."', args=[d.arg(name='restartCount', type=d.T.integer)]), withRestartCount(restartCount): { restartCount: restartCount }, - '#withStarted':: d.fn(help='"Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined."', args=[d.arg(name='started', type=d.T.boolean)]), + '#withStarted':: d.fn(help='"Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false."', args=[d.arg(name='started', type=d.T.boolean)]), withStarted(started): { started: started }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet similarity index 59% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet index cbf504bf3ec..78d0d7516aa 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiPersistentVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiPersistentVolumeSource.libsonnet @@ -3,43 +3,50 @@ '#':: d.pkg(name='csiPersistentVolumeSource', url='', help='"Represents storage that is managed by an external CSI volume driver (Beta feature)"'), '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { controllerExpandSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { controllerExpandSecretRef+: { namespace: namespace } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { controllerPublishSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { controllerPublishSecretRef+: { namespace: namespace } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { nodeExpandSecretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { nodeExpandSecretRef+: { namespace: namespace } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodePublishSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { nodePublishSecretRef+: { namespace: namespace } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodeStageSecretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { nodeStageSecretRef+: { namespace: namespace } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { volumeAttributes: volumeAttributes }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { volumeAttributes+: volumeAttributes }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { volumeHandle: volumeHandle }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet similarity index 71% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet index fecc79031cf..1d8df7aaa97 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/csiVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/csiVolumeSource.libsonnet @@ -6,15 +6,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { nodePublishSecretRef+: { name: name } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeAttributes':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { volumeAttributes: volumeAttributes }, - '#withVolumeAttributesMixin':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { volumeAttributes+: volumeAttributes }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/daemonEndpoint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/daemonEndpoint.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/daemonEndpoint.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/daemonEndpoint.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIProjection.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIProjection.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIProjection.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet new file mode 100644 index 00000000000..2a32c0886a2 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeFile.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='downwardAPIVolumeFile', url='', help='"DownwardAPIVolumeFile represents information to create the file containing the pod field"'), + '#fieldRef':: d.obj(help='"ObjectFieldSelector selects an APIVersioned field of an object."'), + fieldRef: { + '#withApiVersion':: d.fn(help='"Version of the schema the FieldPath is written in terms of, defaults to \\"v1\\"."', args=[d.arg(name='apiVersion', type=d.T.string)]), + withApiVersion(apiVersion): { fieldRef+: { apiVersion: apiVersion } }, + '#withFieldPath':: d.fn(help='"Path of the field to select in the specified API version."', args=[d.arg(name='fieldPath', type=d.T.string)]), + withFieldPath(fieldPath): { fieldRef+: { fieldPath: fieldPath } }, + }, + '#resourceFieldRef':: d.obj(help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), + resourceFieldRef: { + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resourceFieldRef+: { resource: resource } }, + }, + '#withMode':: d.fn(help='"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeSource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/downwardAPIVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/downwardAPIVolumeSource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet new file mode 100644 index 00000000000..1d05420ff43 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/emptyDirVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='emptyDirVolumeSource', url='', help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + withMedium(medium): { medium: medium }, + '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), + withSizeLimit(sizeLimit): { sizeLimit: sizeLimit }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet similarity index 92% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet index dd6d6d9d52b..12628c496a1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointAddress.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointAddress.libsonnet @@ -20,7 +20,7 @@ }, '#withHostname':: d.fn(help='"The Hostname of this endpoint"', args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withIp':: d.fn(help='"The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready."', args=[d.arg(name='ip', type=d.T.string)]), + '#withIp':: d.fn(help='"The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16)."', args=[d.arg(name='ip', type=d.T.string)]), withIp(ip): { ip: ip }, '#withNodeName':: d.fn(help='"Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet new file mode 100644 index 00000000000..1ba81c7f40e --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointPort.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort is a tuple that describes a single port."'), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), + withAppProtocol(appProtocol): { appProtocol: appProtocol }, + '#withName':: d.fn(help="\"The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPort':: d.fn(help='"The port number of the endpoint."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet index 3cb64fc0960..40ac9bb8135 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpointSubset.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpointSubset.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpointSubset', url='', help='"EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\\n {\\n Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n }\\nThe resulting set of endpoints can be viewed as:\\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]"'), + '#':: d.pkg(name='endpointSubset', url='', help='"EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\\n\\n\\t{\\n\\t Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n\\t}\\n\\nThe resulting set of endpoints can be viewed as:\\n\\n\\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\\n\\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]"'), '#withAddresses':: d.fn(help='"IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize."', args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, '#withAddressesMixin':: d.fn(help='"IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet index e1adbbfd79c..10a6231b9a0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/endpoints.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/endpoints.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='endpoints', url='', help='"Endpoints is a collection of endpoints that implement the actual service. Example:\\n Name: \\"mysvc\\",\\n Subsets: [\\n {\\n Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n },\\n {\\n Addresses: [{\\"ip\\": \\"10.10.3.3\\"}],\\n Ports: [{\\"name\\": \\"a\\", \\"port\\": 93}, {\\"name\\": \\"b\\", \\"port\\": 76}]\\n },\\n ]"'), + '#':: d.pkg(name='endpoints', url='', help='"Endpoints is a collection of endpoints that implement the actual service. Example:\\n\\n\\t Name: \\"mysvc\\",\\n\\t Subsets: [\\n\\t {\\n\\t Addresses: [{\\"ip\\": \\"10.10.1.1\\"}, {\\"ip\\": \\"10.10.2.2\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 8675}, {\\"name\\": \\"b\\", \\"port\\": 309}]\\n\\t },\\n\\t {\\n\\t Addresses: [{\\"ip\\": \\"10.10.3.3\\"}],\\n\\t Ports: [{\\"name\\": \\"a\\", \\"port\\": 93}, {\\"name\\": \\"b\\", \\"port\\": 76}]\\n\\t },\\n\\t]"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Endpoints', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envFromSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envFromSource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envFromSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envFromSource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet index 899a0c25d6b..a837784743d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVar.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVar.libsonnet @@ -23,7 +23,7 @@ resourceFieldRef: { '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), withContainerName(containerName): { valueFrom+: { resourceFieldRef+: { containerName: containerName } } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), withDivisor(divisor): { valueFrom+: { resourceFieldRef+: { divisor: divisor } } }, '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), withResource(resource): { valueFrom+: { resourceFieldRef+: { resource: resource } } }, @@ -40,7 +40,7 @@ }, '#withName':: d.fn(help='"Name of the environment variable. Must be a C_IDENTIFIER."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), + '#withValue':: d.fn(help='"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\"$$(VAR_NAME)\\" will produce the string literal \\"$(VAR_NAME)\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \\"\\"."', args=[d.arg(name='value', type=d.T.string)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet similarity index 53% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet index 4fedae6993a..1c830b0ad03 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/envVarSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/envVarSource.libsonnet @@ -21,7 +21,7 @@ resourceFieldRef: { '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), withContainerName(containerName): { resourceFieldRef+: { containerName: containerName } }, - '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), withDivisor(divisor): { resourceFieldRef+: { divisor: divisor } }, '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), withResource(resource): { resourceFieldRef+: { resource: resource } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet similarity index 78% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet index 20a7dd8a0c2..937b5770da3 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralContainer.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralContainer.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='ephemeralContainer', url='', help="\"An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.\""), + '#':: d.pkg(name='ephemeralContainer', url='', help='"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\\n\\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted."'), '#lifecycle':: d.obj(help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), lifecycle: { - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -27,6 +27,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { postStart+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { postStart+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -35,7 +40,7 @@ withPort(port): { lifecycle+: { postStart+: { tcpSocket+: { port: port } } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -59,6 +64,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { lifecycle+: { preStop+: { httpGet+: { scheme: scheme } } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { lifecycle+: { preStop+: { sleep+: { seconds: seconds } } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -77,6 +87,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { livenessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { livenessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { livenessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -107,7 +124,7 @@ withPeriodSeconds(periodSeconds): { livenessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { livenessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { livenessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { livenessProbe+: { timeoutSeconds: timeoutSeconds } }, @@ -121,6 +138,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { readinessProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { readinessProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { readinessProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -151,20 +175,24 @@ withPeriodSeconds(periodSeconds): { readinessProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { readinessProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { readinessProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { readinessProbe+: { timeoutSeconds: timeoutSeconds } }, }, '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), resources: { + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { resources+: { claims: if std.isArray(v=claims) then claims else [claims] } }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { resources+: { claims+: if std.isArray(v=claims) then claims else [claims] } }, '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#securityContext':: d.obj(help='"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence."'), @@ -193,7 +221,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -204,22 +232,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { securityContext+: { allowPrivilegeEscalation: allowPrivilegeEscalation } }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { securityContext+: { privileged: privileged } }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { securityContext+: { procMount: procMount } }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { securityContext+: { readOnlyRootFilesystem: readOnlyRootFilesystem } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, }, '#startupProbe':: d.obj(help='"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic."'), @@ -231,6 +261,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { startupProbe+: { exec+: { command+: if std.isArray(v=command) then command else [command] } } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { startupProbe+: { grpc+: { port: port } } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { startupProbe+: { grpc+: { service: service } } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -261,18 +298,18 @@ withPeriodSeconds(periodSeconds): { startupProbe+: { periodSeconds: periodSeconds } }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { startupProbe+: { successThreshold: successThreshold } }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { startupProbe+: { terminationGracePeriodSeconds: terminationGracePeriodSeconds } }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { startupProbe+: { timeoutSeconds: timeoutSeconds } }, }, - '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), + '#withArgs':: d.fn(help="\"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='args', type=d.T.array)]), withArgs(args): { args: if std.isArray(v=args) then args else [args] }, - '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), + '#withArgsMixin':: d.fn(help="\"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='args', type=d.T.array)]), withArgsMixin(args): { args+: if std.isArray(v=args) then args else [args] }, - '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), + '#withCommand':: d.fn(help="\"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"", args=[d.arg(name='command', type=d.T.array)]), withCommand(command): { command: if std.isArray(v=command) then command else [command] }, - '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), + '#withCommandMixin':: d.fn(help="\"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will produce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { command+: if std.isArray(v=command) then command else [command] }, '#withEnv':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."', args=[d.arg(name='env', type=d.T.array)]), withEnv(env): { env: if std.isArray(v=env) then env else [env] }, @@ -282,7 +319,7 @@ withEnvFromMixin(envFrom): { envFrom+: if std.isArray(v=envFrom) then envFrom else [envFrom] }, '#withEnvMixin':: d.fn(help='"List of environment variables to set in the container. Cannot be updated."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='env', type=d.T.array)]), withEnvMixin(env): { env+: if std.isArray(v=env) then env else [env] }, - '#withImage':: d.fn(help='"Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { image: image }, '#withImagePullPolicy':: d.fn(help='"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images"', args=[d.arg(name='imagePullPolicy', type=d.T.string)]), withImagePullPolicy(imagePullPolicy): { imagePullPolicy: imagePullPolicy }, @@ -292,11 +329,17 @@ withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, '#withPortsMixin':: d.fn(help='"Ports are not allowed for ephemeral containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withResizePolicy':: d.fn(help='"Resources resize policy for the container."', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicy(resizePolicy): { resizePolicy: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withResizePolicyMixin':: d.fn(help='"Resources resize policy for the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resizePolicy', type=d.T.array)]), + withResizePolicyMixin(resizePolicy): { resizePolicy+: if std.isArray(v=resizePolicy) then resizePolicy else [resizePolicy] }, + '#withRestartPolicy':: d.fn(help='"Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers."', args=[d.arg(name='restartPolicy', type=d.T.string)]), + withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, '#withStdin':: d.fn(help='"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false."', args=[d.arg(name='stdin', type=d.T.boolean)]), withStdin(stdin): { stdin: stdin }, '#withStdinOnce':: d.fn(help='"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false"', args=[d.arg(name='stdinOnce', type=d.T.boolean)]), withStdinOnce(stdinOnce): { stdinOnce: stdinOnce }, - '#withTargetContainerName':: d.fn(help='"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature."', args=[d.arg(name='targetContainerName', type=d.T.string)]), + '#withTargetContainerName':: d.fn(help='"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\\n\\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined."', args=[d.arg(name='targetContainerName', type=d.T.string)]), withTargetContainerName(targetContainerName): { targetContainerName: targetContainerName }, '#withTerminationMessagePath':: d.fn(help="\"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.\"", args=[d.arg(name='terminationMessagePath', type=d.T.string)]), withTerminationMessagePath(terminationMessagePath): { terminationMessagePath: terminationMessagePath }, @@ -308,9 +351,9 @@ withVolumeDevices(volumeDevices): { volumeDevices: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, '#withVolumeDevicesMixin':: d.fn(help='"volumeDevices is the list of block devices to be used by the container."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeDevices', type=d.T.array)]), withVolumeDevicesMixin(volumeDevices): { volumeDevices+: if std.isArray(v=volumeDevices) then volumeDevices else [volumeDevices] }, - '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Cannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), + '#withVolumeMounts':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.\"", args=[d.arg(name='volumeMounts', type=d.T.array)]), withVolumeMounts(volumeMounts): { volumeMounts: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, - '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Cannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), + '#withVolumeMountsMixin':: d.fn(help="\"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeMounts', type=d.T.array)]), withVolumeMountsMixin(volumeMounts): { volumeMounts+: if std.isArray(v=volumeMounts) then volumeMounts else [volumeMounts] }, '#withWorkingDir':: d.fn(help="\"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.\"", args=[d.arg(name='workingDir', type=d.T.string)]), withWorkingDir(workingDir): { workingDir: workingDir }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet similarity index 76% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet index d3e1ad852dc..45d326c96e4 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/ephemeralVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/ephemeralVolumeSource.libsonnet @@ -5,12 +5,10 @@ volumeClaimTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { volumeClaimTemplate+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { volumeClaimTemplate+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { volumeClaimTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { volumeClaimTemplate+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { volumeClaimTemplate+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { volumeClaimTemplate+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { volumeClaimTemplate+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { volumeClaimTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { volumeClaimTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { volumeClaimTemplate+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { volumeClaimTemplate+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { volumeClaimTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,9 +41,9 @@ withOwnerReferencesMixin(ownerReferences): { volumeClaimTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { volumeClaimTemplate+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { volumeClaimTemplate+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { volumeClaimTemplate+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -59,15 +57,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -81,15 +90,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } }, }, }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet similarity index 90% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet index 78021ee8ef7..5211af2599b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/event.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/event.libsonnet @@ -20,12 +20,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -36,21 +34,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -58,9 +56,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSeries.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSeries.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSeries.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSeries.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/eventSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/eventSource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/execAction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/execAction.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/execAction.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/execAction.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet new file mode 100644 index 00000000000..31608c3d0f2 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/fcVolumeSource.libsonnet @@ -0,0 +1,20 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='fcVolumeSource', url='', help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNs(targetWWNs): { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + withTargetWWNsMixin(targetWWNs): { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] }, + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + withWwids(wwids): { wwids: if std.isArray(v=wwids) then wwids else [wwids] }, + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + withWwidsMixin(wwids): { wwids+: if std.isArray(v=wwids) then wwids else [wwids] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet similarity index 50% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet index e2324d0aa86..545fc3a115e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexPersistentVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexPersistentVolumeSource.libsonnet @@ -3,20 +3,20 @@ '#':: d.pkg(name='flexPersistentVolumeSource', url='', help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { secretRef+: { namespace: namespace } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { options: options }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { options+: options }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet similarity index 51% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet index 09fb22cbb4c..d365d5e863b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flexVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flexVolumeSource.libsonnet @@ -6,15 +6,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { driver: driver }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { options: options }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { options+: options }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet similarity index 54% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet index 0d140f17ad6..722d636ac98 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/flockerVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/flockerVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='flockerVolumeSource', url='', help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { datasetName: datasetName }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { datasetUUID: datasetUUID }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..e045186a789 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gcePersistentDiskVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='gcePersistentDiskVolumeSource', url='', help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + withPartition(partition): { partition: partition }, + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + withPdName(pdName): { pdName: pdName }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet similarity index 53% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet index 5026bd1cb7b..e9e60213059 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/gitRepoVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/gitRepoVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='gitRepoVolumeSource', url='', help="\"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\\n\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.\""), - '#withDirectory':: d.fn(help="\"Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + '#withDirectory':: d.fn(help="\"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), withDirectory(directory): { directory: directory }, - '#withRepository':: d.fn(help='"Repository URL"', args=[d.arg(name='repository', type=d.T.string)]), + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), withRepository(repository): { repository: repository }, - '#withRevision':: d.fn(help='"Commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), withRevision(revision): { revision: revision }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet similarity index 72% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet index 7959032f0ac..d504871c659 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsPersistentVolumeSource.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='glusterfsPersistentVolumeSource', url='', help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { endpoints: endpoints }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { endpointsNamespace: endpointsNamespace }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet similarity index 67% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet index 5f61ee9b7da..1698ec6362a 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/glusterfsVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/glusterfsVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='glusterfsVolumeSource', url='', help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { endpoints: endpoints }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet new file mode 100644 index 00000000000..d2d37a0e0e1 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/grpcAction.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='grpcAction', url='', help=''), + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { service: service }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostAlias.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostAlias.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostAlias.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostAlias.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet new file mode 100644 index 00000000000..945b1e004c0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostIP.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='hostIP', url='', help='"HostIP represents a single IP address allocated to the host."'), + '#withIp':: d.fn(help='"IP is the IP address assigned to the host"', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet similarity index 83% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet index f7e35872a58..1e3dd933627 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/hostPathVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/hostPathVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='hostPathVolumeSource', url='', help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpGetAction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpGetAction.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpGetAction.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpGetAction.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet similarity index 65% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet index 0a78adb55e8..39644271996 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/httpHeader.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/httpHeader.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='httpHeader', url='', help='"HTTPHeader describes a custom header to be used in HTTP probes"'), - '#withName':: d.fn(help='"The header field name"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#withValue':: d.fn(help='"The header field value"', args=[d.arg(name='value', type=d.T.string)]), withValue(value): { value: value }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..430270d71af --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiPersistentVolumeSource.libsonnet @@ -0,0 +1,35 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='iscsiPersistentVolumeSource', url='', help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { initiatorName: initiatorName }, + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iqn: iqn }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { targetPortal: targetPortal }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet new file mode 100644 index 00000000000..e905fb959b7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/iscsiVolumeSource.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='iscsiVolumeSource', url='', help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + withChapAuthDiscovery(chapAuthDiscovery): { chapAuthDiscovery: chapAuthDiscovery }, + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + withChapAuthSession(chapAuthSession): { chapAuthSession: chapAuthSession }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + withInitiatorName(initiatorName): { initiatorName: initiatorName }, + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + withIqn(iqn): { iqn: iqn }, + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + withIscsiInterface(iscsiInterface): { iscsiInterface: iscsiInterface }, + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + withLun(lun): { lun: lun }, + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + withPortals(portals): { portals: if std.isArray(v=portals) then portals else [portals] }, + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + withPortalsMixin(portals): { portals+: if std.isArray(v=portals) then portals else [portals] }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + withTargetPortal(targetPortal): { targetPortal: targetPortal }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet new file mode 100644 index 00000000000..288c94ad40b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/keyToPath.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='keyToPath', url='', help='"Maps a string key to a path within a volume."'), + '#withKey':: d.fn(help='"key is the key to project."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withMode':: d.fn(help='"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='mode', type=d.T.integer)]), + withMode(mode): { mode: mode }, + '#withPath':: d.fn(help="\"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.\"", args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet similarity index 89% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet index ca610c685b8..84282f59e9b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/lifecycle.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycle.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='lifecycle', url='', help='"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted."'), - '#postStart':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#postStart':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), postStart: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -25,6 +25,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { postStart+: { httpGet+: { scheme: scheme } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { postStart+: { sleep+: { seconds: seconds } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), @@ -33,7 +38,7 @@ withPort(port): { postStart+: { tcpSocket+: { port: port } } }, }, }, - '#preStop':: d.obj(help='"Handler defines a specific action that should be taken"'), + '#preStop':: d.obj(help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), preStop: { '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { @@ -57,6 +62,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { preStop+: { httpGet+: { scheme: scheme } } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { preStop+: { sleep+: { seconds: seconds } } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet similarity index 88% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet index 5067f1ffc93..1e967c78bbd 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/handler.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/lifecycleHandler.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='handler', url='', help='"Handler defines a specific action that should be taken"'), + '#':: d.pkg(name='lifecycleHandler', url='', help='"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified."'), '#exec':: d.obj(help='"ExecAction describes a \\"run in container\\" action."'), exec: { '#withCommand':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"", args=[d.arg(name='command', type=d.T.array)]), @@ -23,6 +23,11 @@ '#withScheme':: d.fn(help='"Scheme to use for connecting to the host. Defaults to HTTP."', args=[d.arg(name='scheme', type=d.T.string)]), withScheme(scheme): { httpGet+: { scheme: scheme } }, }, + '#sleep':: d.obj(help='"SleepAction describes a \\"sleep\\" action."'), + sleep: { + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { sleep+: { seconds: seconds } }, + }, '#tcpSocket':: d.obj(help='"TCPSocketAction describes an action based on opening a socket"'), tcpSocket: { '#withHost':: d.fn(help='"Optional: Host name to connect to, defaults to the pod IP."', args=[d.arg(name='host', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet index eea20a3888d..7310f781b2e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRange.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRange.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='limitRange', url='', help='"LimitRange sets resource usage limits for each kind of resource in a Namespace."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of LimitRange', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeItem.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeItem.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeItem.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeItem.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/limitRangeSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/limitRangeSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet similarity index 68% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet index fae81edd0e7..17d2f7ea15f 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerIngress.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerIngress.libsonnet @@ -5,6 +5,8 @@ withHostname(hostname): { hostname: hostname }, '#withIp':: d.fn(help='"IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)"', args=[d.arg(name='ip', type=d.T.string)]), withIp(ip): { ip: ip }, + '#withIpMode':: d.fn(help="\"IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \\\"VIP\\\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \\\"Proxy\\\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.\"", args=[d.arg(name='ipMode', type=d.T.string)]), + withIpMode(ipMode): { ipMode: ipMode }, '#withPorts':: d.fn(help='"Ports is a list of records of service ports If used, every port defined in the service should have an entry in it"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, '#withPortsMixin':: d.fn(help='"Ports is a list of records of service ports If used, every port defined in the service should have an entry in it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/loadBalancerStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/loadBalancerStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localObjectReference.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/localObjectReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localObjectReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet new file mode 100644 index 00000000000..420a881e95f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/localVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='localVolumeSource', url='', help='"Local represents directly-attached storage with node affinity (Beta feature)"'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { path: path }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet similarity index 92% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet index b4d784e4866..ef9fbe09d76 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/main.libsonnet @@ -13,7 +13,9 @@ cephFSVolumeSource: (import 'cephFSVolumeSource.libsonnet'), cinderPersistentVolumeSource: (import 'cinderPersistentVolumeSource.libsonnet'), cinderVolumeSource: (import 'cinderVolumeSource.libsonnet'), + claimSource: (import 'claimSource.libsonnet'), clientIPConfig: (import 'clientIPConfig.libsonnet'), + clusterTrustBundleProjection: (import 'clusterTrustBundleProjection.libsonnet'), componentCondition: (import 'componentCondition.libsonnet'), componentStatus: (import 'componentStatus.libsonnet'), configMap: (import 'configMap.libsonnet'), @@ -25,6 +27,7 @@ container: (import 'container.libsonnet'), containerImage: (import 'containerImage.libsonnet'), containerPort: (import 'containerPort.libsonnet'), + containerResizePolicy: (import 'containerResizePolicy.libsonnet'), containerState: (import 'containerState.libsonnet'), containerStateRunning: (import 'containerStateRunning.libsonnet'), containerStateTerminated: (import 'containerStateTerminated.libsonnet'), @@ -45,7 +48,6 @@ envVar: (import 'envVar.libsonnet'), envVarSource: (import 'envVarSource.libsonnet'), ephemeralContainer: (import 'ephemeralContainer.libsonnet'), - ephemeralContainers: (import 'ephemeralContainers.libsonnet'), ephemeralVolumeSource: (import 'ephemeralVolumeSource.libsonnet'), event: (import 'event.libsonnet'), eventSeries: (import 'eventSeries.libsonnet'), @@ -59,8 +61,9 @@ gitRepoVolumeSource: (import 'gitRepoVolumeSource.libsonnet'), glusterfsPersistentVolumeSource: (import 'glusterfsPersistentVolumeSource.libsonnet'), glusterfsVolumeSource: (import 'glusterfsVolumeSource.libsonnet'), - handler: (import 'handler.libsonnet'), + grpcAction: (import 'grpcAction.libsonnet'), hostAlias: (import 'hostAlias.libsonnet'), + hostIP: (import 'hostIP.libsonnet'), hostPathVolumeSource: (import 'hostPathVolumeSource.libsonnet'), httpGetAction: (import 'httpGetAction.libsonnet'), httpHeader: (import 'httpHeader.libsonnet'), @@ -68,6 +71,7 @@ iscsiVolumeSource: (import 'iscsiVolumeSource.libsonnet'), keyToPath: (import 'keyToPath.libsonnet'), lifecycle: (import 'lifecycle.libsonnet'), + lifecycleHandler: (import 'lifecycleHandler.libsonnet'), limitRange: (import 'limitRange.libsonnet'), limitRangeItem: (import 'limitRangeItem.libsonnet'), limitRangeSpec: (import 'limitRangeSpec.libsonnet'), @@ -75,6 +79,7 @@ loadBalancerStatus: (import 'loadBalancerStatus.libsonnet'), localObjectReference: (import 'localObjectReference.libsonnet'), localVolumeSource: (import 'localVolumeSource.libsonnet'), + modifyVolumeStatus: (import 'modifyVolumeStatus.libsonnet'), namespace: (import 'namespace.libsonnet'), namespaceCondition: (import 'namespaceCondition.libsonnet'), namespaceSpec: (import 'namespaceSpec.libsonnet'), @@ -113,7 +118,11 @@ podDNSConfig: (import 'podDNSConfig.libsonnet'), podDNSConfigOption: (import 'podDNSConfigOption.libsonnet'), podIP: (import 'podIP.libsonnet'), + podOS: (import 'podOS.libsonnet'), podReadinessGate: (import 'podReadinessGate.libsonnet'), + podResourceClaim: (import 'podResourceClaim.libsonnet'), + podResourceClaimStatus: (import 'podResourceClaimStatus.libsonnet'), + podSchedulingGate: (import 'podSchedulingGate.libsonnet'), podSecurityContext: (import 'podSecurityContext.libsonnet'), podSpec: (import 'podSpec.libsonnet'), podStatus: (import 'podStatus.libsonnet'), @@ -131,6 +140,7 @@ replicationControllerCondition: (import 'replicationControllerCondition.libsonnet'), replicationControllerSpec: (import 'replicationControllerSpec.libsonnet'), replicationControllerStatus: (import 'replicationControllerStatus.libsonnet'), + resourceClaim: (import 'resourceClaim.libsonnet'), resourceFieldSelector: (import 'resourceFieldSelector.libsonnet'), resourceQuota: (import 'resourceQuota.libsonnet'), resourceQuotaSpec: (import 'resourceQuotaSpec.libsonnet'), @@ -156,6 +166,7 @@ serviceSpec: (import 'serviceSpec.libsonnet'), serviceStatus: (import 'serviceStatus.libsonnet'), sessionAffinityConfig: (import 'sessionAffinityConfig.libsonnet'), + sleepAction: (import 'sleepAction.libsonnet'), storageOSPersistentVolumeSource: (import 'storageOSPersistentVolumeSource.libsonnet'), storageOSVolumeSource: (import 'storageOSVolumeSource.libsonnet'), sysctl: (import 'sysctl.libsonnet'), @@ -166,11 +177,13 @@ topologySelectorTerm: (import 'topologySelectorTerm.libsonnet'), topologySpreadConstraint: (import 'topologySpreadConstraint.libsonnet'), typedLocalObjectReference: (import 'typedLocalObjectReference.libsonnet'), + typedObjectReference: (import 'typedObjectReference.libsonnet'), volume: (import 'volume.libsonnet'), volumeDevice: (import 'volumeDevice.libsonnet'), volumeMount: (import 'volumeMount.libsonnet'), volumeNodeAffinity: (import 'volumeNodeAffinity.libsonnet'), volumeProjection: (import 'volumeProjection.libsonnet'), + volumeResourceRequirements: (import 'volumeResourceRequirements.libsonnet'), vsphereVirtualDiskVolumeSource: (import 'vsphereVirtualDiskVolumeSource.libsonnet'), weightedPodAffinityTerm: (import 'weightedPodAffinityTerm.libsonnet'), windowsSecurityContextOptions: (import 'windowsSecurityContextOptions.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet new file mode 100644 index 00000000000..7ff98a27d28 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/modifyVolumeStatus.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='modifyVolumeStatus', url='', help='"ModifyVolumeStatus represents the status object of ControllerModifyVolume operation"'), + '#withTargetVolumeAttributesClassName':: d.fn(help='"targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled"', args=[d.arg(name='targetVolumeAttributesClassName', type=d.T.string)]), + withTargetVolumeAttributesClassName(targetVolumeAttributesClassName): { targetVolumeAttributesClassName: targetVolumeAttributesClassName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet index 520aec3462e..d95602b3c43 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespace.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespace.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='namespace', url='', help='"Namespace provides a scope for Names. Use of multiple namespaces is optional."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Namespace', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/namespaceStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/namespaceStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet index 723a927ebda..ce26840604e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nfsVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nfsVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nfsVolumeSource', url='', help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { server: server }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet index 541fd34b1fc..6303a35b316 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/node.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/node.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='node', url='', help='"Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd)."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Node', args=[d.arg(name='name', type=d.T.string)]), @@ -53,9 +51,9 @@ } + self.metadata.withName(name=name), '#spec':: d.obj(help='"NodeSpec describes the attributes that a node is created with."'), spec: { - '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), configSource: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { spec+: { configSource+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAddress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAddress.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAddress.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAddress.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAffinity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAffinity.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeAffinity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeAffinity.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet similarity index 89% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet index f1f5b17762c..4845bb030b7 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigSource.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='nodeConfigSource', url='', help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#':: d.pkg(name='nodeConfigSource', url='', help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { configMap+: { kubeletConfigKey: kubeletConfigKey } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet similarity index 92% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet index 3648b5e4835..d79efd2857d 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeConfigStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeConfigStatus.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nodeConfigStatus', url='', help='"NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource."'), - '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), active: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { active+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, @@ -17,9 +17,9 @@ withUid(uid): { active+: { configMap+: { uid: uid } } }, }, }, - '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), assigned: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { assigned+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, @@ -33,9 +33,9 @@ withUid(uid): { assigned+: { configMap+: { uid: uid } } }, }, }, - '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), lastKnownGood: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { lastKnownGood+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeDaemonEndpoints.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeDaemonEndpoints.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeDaemonEndpoints.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeDaemonEndpoints.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelector.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorRequirement.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorRequirement.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorRequirement.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorRequirement.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorTerm.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorTerm.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSelectorTerm.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSelectorTerm.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet similarity index 95% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet index fddf879d618..a06625e279e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSpec.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='nodeSpec', url='', help='"NodeSpec describes the attributes that a node is created with."'), - '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#configSource':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), configSource: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { configSource+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet index 569c8753069..542ee6edaba 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeStatus.libsonnet @@ -3,9 +3,9 @@ '#':: d.pkg(name='nodeStatus', url='', help='"NodeStatus is information about the current status of a node."'), '#config':: d.obj(help='"NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource."'), config: { - '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#active':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), active: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { active+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -19,9 +19,9 @@ withUid(uid): { config+: { active+: { configMap+: { uid: uid } } } }, }, }, - '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#assigned':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), assigned: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { assigned+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -35,9 +35,9 @@ withUid(uid): { config+: { assigned+: { configMap+: { uid: uid } } } }, }, }, - '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil."'), + '#lastKnownGood':: d.obj(help='"NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22"'), lastKnownGood: { - '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node."'), + '#configMap':: d.obj(help='"ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"'), configMap: { '#withKubeletConfigKey':: d.fn(help='"KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."', args=[d.arg(name='kubeletConfigKey', type=d.T.string)]), withKubeletConfigKey(kubeletConfigKey): { config+: { lastKnownGood+: { configMap+: { kubeletConfigKey: kubeletConfigKey } } } }, @@ -68,7 +68,7 @@ withArchitecture(architecture): { nodeInfo+: { architecture: architecture } }, '#withBootID':: d.fn(help='"Boot ID reported by the node."', args=[d.arg(name='bootID', type=d.T.string)]), withBootID(bootID): { nodeInfo+: { bootID: bootID } }, - '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), + '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), withContainerRuntimeVersion(containerRuntimeVersion): { nodeInfo+: { containerRuntimeVersion: containerRuntimeVersion } }, '#withKernelVersion':: d.fn(help="\"Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\"", args=[d.arg(name='kernelVersion', type=d.T.string)]), withKernelVersion(kernelVersion): { nodeInfo+: { kernelVersion: kernelVersion } }, @@ -85,9 +85,9 @@ '#withSystemUUID':: d.fn(help='"SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid"', args=[d.arg(name='systemUUID', type=d.T.string)]), withSystemUUID(systemUUID): { nodeInfo+: { systemUUID: systemUUID } }, }, - '#withAddresses':: d.fn(help='"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example."', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddresses':: d.fn(help="\"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).\"", args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddressesMixin':: d.fn(help="\"List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='addresses', type=d.T.array)]), withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, '#withAllocatable':: d.fn(help='"Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity."', args=[d.arg(name='allocatable', type=d.T.object)]), withAllocatable(allocatable): { allocatable: allocatable }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet similarity index 94% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet index 8b4d2593097..110062278e2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/nodeSystemInfo.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/nodeSystemInfo.libsonnet @@ -5,7 +5,7 @@ withArchitecture(architecture): { architecture: architecture }, '#withBootID':: d.fn(help='"Boot ID reported by the node."', args=[d.arg(name='bootID', type=d.T.string)]), withBootID(bootID): { bootID: bootID }, - '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), + '#withContainerRuntimeVersion':: d.fn(help='"ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2)."', args=[d.arg(name='containerRuntimeVersion', type=d.T.string)]), withContainerRuntimeVersion(containerRuntimeVersion): { containerRuntimeVersion: containerRuntimeVersion }, '#withKernelVersion':: d.fn(help="\"Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).\"", args=[d.arg(name='kernelVersion', type=d.T.string)]), withKernelVersion(kernelVersion): { kernelVersion: kernelVersion }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectFieldSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectFieldSelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectFieldSelector.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectFieldSelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectReference.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/objectReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/objectReference.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet similarity index 61% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet index d87caa24595..888fd18e425 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolume.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolume.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolume', url='', help='"PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PersistentVolume', args=[d.arg(name='name', type=d.T.string)]), @@ -55,77 +53,77 @@ spec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { spec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { spec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { spec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { azureDisk+: { readOnly: readOnly } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { spec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { spec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { spec+: { azureFile+: { shareName: shareName } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { spec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { cephfs+: { user: user } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { cinder+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { cinder+: { volumeID: volumeID } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -149,164 +147,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { csi+: { nodeExpandSecretRef+: { name: name } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { spec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { spec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { spec+: { csi+: { volumeHandle: volumeHandle } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { spec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { spec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { spec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { spec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { spec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { spec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { flexVolume+: { readOnly: readOnly } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { spec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { spec+: { flocker+: { datasetUUID: datasetUUID } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { spec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { gcePersistentDisk+: { readOnly: readOnly } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { spec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { spec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { glusterfs+: { readOnly: readOnly } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { hostPath+: { type: type } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { spec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { spec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { spec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { spec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { spec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { spec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { spec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { spec+: { iscsi+: { targetPortal: targetPortal } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { 'local'+: { path: path } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { spec+: { nfs+: { server: server } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -321,87 +326,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { spec+: { photonPersistentDisk+: { pdID: pdID } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { portworxVolume+: { volumeID: volumeID } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { spec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { spec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { spec+: { quobyte+: { volume: volume } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { rbd+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { spec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { spec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { spec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { rbd+: { user: user } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { spec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { spec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { spec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { spec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { spec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { spec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { scaleIO+: { volumeName: volumeName } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -423,42 +428,44 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { storageos+: { secretRef+: { uid: uid } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { spec+: { storageos+: { volumeNamespace: volumeNamespace } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { spec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { spec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { spec+: { vsphereVolume+: { volumePath: volumePath } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { spec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { spec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { spec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { spec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet index 43fb102e610..ea146d73c7e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaim.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaim.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolumeClaim', url='', help="\"PersistentVolumeClaim is a user's request for and claim to a persistent volume\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PersistentVolumeClaim', args=[d.arg(name='name', type=d.T.string)]), @@ -62,15 +60,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { dataSource+: { name: name } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { dataSourceRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { dataSourceRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { dataSourceRef+: { name: name } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { dataSourceRef+: { namespace: namespace } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { spec+: { resources+: { limits: limits } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { spec+: { resources+: { limits+: limits } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { spec+: { resources+: { requests: requests } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { spec+: { resources+: { requests+: requests } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -84,15 +93,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { volumeName: volumeName } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet similarity index 66% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet index 80bd3eb5b7b..d4dd24ade92 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimCondition.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimCondition.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='persistentVolumeClaimCondition', url='', help='"PersistentVolumeClaimCondition contails details about state of pvc"'), + '#':: d.pkg(name='persistentVolumeClaimCondition', url='', help='"PersistentVolumeClaimCondition contains details about state of pvc"'), '#withLastProbeTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastProbeTime', type=d.T.string)]), withLastProbeTime(lastProbeTime): { lastProbeTime: lastProbeTime }, '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"Human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"message is the human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \\\"ResizeStarted\\\" that means the underlying persistent volume is being resized.\"", args=[d.arg(name='reason', type=d.T.string)]), + '#withReason':: d.fn(help="\"reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \\\"ResizeStarted\\\" that means the underlying persistent volume is being resized.\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, '#withType':: d.fn(help='', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet similarity index 60% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet index 37a653e22c1..811c0cf70a9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimSpec.libsonnet @@ -10,15 +10,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { dataSource+: { name: name } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { dataSourceRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { dataSourceRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { dataSourceRef+: { name: name } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { dataSourceRef+: { namespace: namespace } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { resources+: { limits: limits } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { resources+: { limits+: limits } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { resources+: { requests: requests } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { resources+: { requests+: requests } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -32,15 +43,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { selector+: { matchLabels+: matchLabels } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeAttributesClassName: volumeAttributesClassName }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeMode: volumeMode }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet new file mode 100644 index 00000000000..371bca51a82 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimStatus.libsonnet @@ -0,0 +1,35 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='persistentVolumeClaimStatus', url='', help='"PersistentVolumeClaimStatus is the current status of a persistent volume claim."'), + '#modifyVolumeStatus':: d.obj(help='"ModifyVolumeStatus represents the status object of ControllerModifyVolume operation"'), + modifyVolumeStatus: { + '#withTargetVolumeAttributesClassName':: d.fn(help='"targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled"', args=[d.arg(name='targetVolumeAttributesClassName', type=d.T.string)]), + withTargetVolumeAttributesClassName(targetVolumeAttributesClassName): { modifyVolumeStatus+: { targetVolumeAttributesClassName: targetVolumeAttributesClassName } }, + }, + '#withAccessModes':: d.fn(help='"accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, + '#withAccessModesMixin':: d.fn(help='"accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, + '#withAllocatedResourceStatuses':: d.fn(help="\"allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\\"example.com/my-custom-resource\\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nClaimResourceStatus can be in any of following states:\\n\\t- ControllerResizeInProgress:\\n\\t\\tState set when resize controller starts resizing the volume in control-plane.\\n\\t- ControllerResizeFailed:\\n\\t\\tState set when resize has failed in resize controller with a terminal error.\\n\\t- NodeResizePending:\\n\\t\\tState set when resize controller has finished resizing the volume but further resizing of\\n\\t\\tvolume is needed on the node.\\n\\t- NodeResizeInProgress:\\n\\t\\tState set when kubelet starts resizing the volume.\\n\\t- NodeResizeFailed:\\n\\t\\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\\n\\t\\tNodeResizeFailed.\\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\\n\\t- pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeFailed\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizePending\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeFailed\\\"\\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\\n\\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.\"", args=[d.arg(name='allocatedResourceStatuses', type=d.T.object)]), + withAllocatedResourceStatuses(allocatedResourceStatuses): { allocatedResourceStatuses: allocatedResourceStatuses }, + '#withAllocatedResourceStatusesMixin':: d.fn(help="\"allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\\"example.com/my-custom-resource\\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nClaimResourceStatus can be in any of following states:\\n\\t- ControllerResizeInProgress:\\n\\t\\tState set when resize controller starts resizing the volume in control-plane.\\n\\t- ControllerResizeFailed:\\n\\t\\tState set when resize has failed in resize controller with a terminal error.\\n\\t- NodeResizePending:\\n\\t\\tState set when resize controller has finished resizing the volume but further resizing of\\n\\t\\tvolume is needed on the node.\\n\\t- NodeResizeInProgress:\\n\\t\\tState set when kubelet starts resizing the volume.\\n\\t- NodeResizeFailed:\\n\\t\\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\\n\\t\\tNodeResizeFailed.\\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\\n\\t- pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"ControllerResizeFailed\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizePending\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeInProgress\\\"\\n - pvc.status.allocatedResourceStatus['storage'] = \\\"NodeResizeFailed\\\"\\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\\n\\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='allocatedResourceStatuses', type=d.T.object)]), + withAllocatedResourceStatusesMixin(allocatedResourceStatuses): { allocatedResourceStatuses+: allocatedResourceStatuses }, + '#withAllocatedResources':: d.fn(help='"allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\"example.com/my-custom-resource\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\\n\\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature."', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResources(allocatedResources): { allocatedResources: allocatedResources }, + '#withAllocatedResourcesMixin':: d.fn(help='"allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\\n\\t* Un-prefixed keys:\\n\\t\\t- storage - the capacity of the volume.\\n\\t* Custom resources must use implementation-defined prefixed names such as \\"example.com/my-custom-resource\\"\\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\\n\\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\\n\\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\\n\\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allocatedResources', type=d.T.object)]), + withAllocatedResourcesMixin(allocatedResources): { allocatedResources+: allocatedResources }, + '#withCapacity':: d.fn(help='"capacity represents the actual resources of the underlying volume."', args=[d.arg(name='capacity', type=d.T.object)]), + withCapacity(capacity): { capacity: capacity }, + '#withCapacityMixin':: d.fn(help='"capacity represents the actual resources of the underlying volume."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='capacity', type=d.T.object)]), + withCapacityMixin(capacity): { capacity+: capacity }, + '#withConditions':: d.fn(help="\"conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"", args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help="\"conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withCurrentVolumeAttributesClassName':: d.fn(help='"currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='currentVolumeAttributesClassName', type=d.T.string)]), + withCurrentVolumeAttributesClassName(currentVolumeAttributesClassName): { currentVolumeAttributesClassName: currentVolumeAttributesClassName }, + '#withPhase':: d.fn(help='"phase represents the current phase of PersistentVolumeClaim."', args=[d.arg(name='phase', type=d.T.string)]), + withPhase(phase): { phase: phase }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet index 3e0ace78d50..92d58d84b99 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimTemplate.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='persistentVolumeClaimTemplate', url='', help='"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -57,15 +55,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { dataSource+: { name: name } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { dataSourceRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { dataSourceRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { dataSourceRef+: { name: name } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { dataSourceRef+: { namespace: namespace } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { spec+: { resources+: { limits: limits } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { spec+: { resources+: { limits+: limits } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { spec+: { resources+: { requests: requests } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { spec+: { resources+: { requests+: requests } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -79,15 +88,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { selector+: { matchLabels+: matchLabels } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { volumeMode: volumeMode } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { volumeName: volumeName } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet similarity index 77% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet index 5f434704f60..41a0af8d37d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeClaimVolumeSource.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='persistentVolumeClaimVolumeSource', url='', help="\"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).\""), - '#withClaimName':: d.fn(help='"ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), withClaimName(claimName): { claimName: claimName }, - '#withReadOnly':: d.fn(help='"Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet similarity index 54% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet index 129be6a596e..e20d809ce6b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/persistentVolumeSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeSpec.libsonnet @@ -3,77 +3,77 @@ '#':: d.pkg(name='persistentVolumeSpec', url='', help='"PersistentVolumeSpec is the specification of a persistent volume."'), '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { azureDisk+: { diskName: diskName } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { azureDisk+: { fsType: fsType } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { azureDisk+: { kind: kind } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { azureFile+: { secretName: secretName } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { azureFile+: { secretNamespace: secretNamespace } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { azureFile+: { shareName: shareName } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cephfs+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { cephfs+: { secretRef+: { namespace: namespace } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { cephfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { cephfs+: { user: user } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cinder+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { cinder+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { cinder+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -97,164 +97,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { controllerExpandSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { controllerExpandSecretRef+: { namespace: namespace } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { controllerPublishSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { controllerPublishSecretRef+: { namespace: namespace } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { csi+: { nodeExpandSecretRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { csi+: { nodeExpandSecretRef+: { namespace: namespace } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { nodePublishSecretRef+: { namespace: namespace } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodeStageSecretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { csi+: { nodeStageSecretRef+: { namespace: namespace } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { csi+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { csi+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { csi+: { volumeHandle: volumeHandle } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fc+: { fsType: fsType } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { fc+: { lun: lun } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { flexVolume+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { flexVolume+: { secretRef+: { namespace: namespace } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { flexVolume+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { flexVolume+: { fsType: fsType } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { flexVolume+: { options: options } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { flexVolume+: { options+: options } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { gcePersistentDisk+: { partition: partition } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { glusterfs+: { endpointsNamespace: endpointsNamespace } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { glusterfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { hostPath+: { path: path } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { hostPath+: { type: type } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { iscsi+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { iscsi+: { secretRef+: { namespace: namespace } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { iscsi+: { fsType: fsType } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { iscsi+: { iqn: iqn } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { iscsi+: { lun: lun } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { 'local'+: { fsType: fsType } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { 'local'+: { path: path } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { nfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { nfs+: { server: server } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -269,87 +276,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { portworxVolume+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { quobyte+: { group: group } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { quobyte+: { registry: registry } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { quobyte+: { tenant: tenant } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { quobyte+: { user: user } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { quobyte+: { volume: volume } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { rbd+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { rbd+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { rbd+: { fsType: fsType } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { rbd+: { image: image } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { rbd+: { keyring: keyring } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { rbd+: { pool: pool } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { rbd+: { user: user } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleIO+: { secretRef+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { scaleIO+: { secretRef+: { namespace: namespace } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { scaleIO+: { fsType: fsType } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { scaleIO+: { gateway: gateway } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { scaleIO+: { system: system } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -371,42 +378,44 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { storageos+: { secretRef+: { uid: uid } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { storageos+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { capacity: capacity }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { capacity+: capacity }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { volumeAttributesClassName: volumeAttributesClassName }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { volumeMode: volumeMode }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet new file mode 100644 index 00000000000..e855006c2ae --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/persistentVolumeStatus.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='persistentVolumeStatus', url='', help='"PersistentVolumeStatus is the current status of a persistent volume."'), + '#withLastPhaseTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastPhaseTransitionTime', type=d.T.string)]), + withLastPhaseTransitionTime(lastPhaseTransitionTime): { lastPhaseTransitionTime: lastPhaseTransitionTime }, + '#withMessage':: d.fn(help='"message is a human-readable message indicating details about why the volume is in this state."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withPhase':: d.fn(help='"phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase"', args=[d.arg(name='phase', type=d.T.string)]), + withPhase(phase): { phase: phase }, + '#withReason':: d.fn(help='"reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..15b92e04340 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/photonPersistentDiskVolumeSource.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='photonPersistentDiskVolumeSource', url='', help='"Represents a Photon Controller persistent disk resource."'), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + withPdID(pdID): { pdID: pdID }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet index abfdcb6e4df..b663c0d888e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/pod.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/pod.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='pod', url='', help='"Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Pod', args=[d.arg(name='name', type=d.T.string)]), @@ -107,6 +105,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { os+: { name: name } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -122,7 +125,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { securityContext+: { seccompProfile+: { type: type } } } }, @@ -133,26 +136,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { securityContext+: { fsGroup: fsGroup } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { securityContext+: { runAsGroup: runAsGroup } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { securityContext+: { runAsUser: runAsUser } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -167,9 +172,9 @@ withDnsPolicy(dnsPolicy): { spec+: { dnsPolicy: dnsPolicy } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { enableServiceLinks: enableServiceLinks } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } }, @@ -181,11 +186,13 @@ withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { hostPID: hostPID } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { hostUsers: hostUsers } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { hostname: hostname } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } }, @@ -197,26 +204,34 @@ withNodeSelector(nodeSelector): { spec+: { nodeSelector: nodeSelector } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { nodeSelector+: nodeSelector } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { overhead: overhead } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { overhead+: overhead } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { preemptionPolicy: preemptionPolicy } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { priority: priority } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { priorityClassName: priorityClassName } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { restartPolicy: restartPolicy } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { runtimeClassName: runtimeClassName } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { schedulerName: schedulerName } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { serviceAccount: serviceAccount } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinity.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinity.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet similarity index 59% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet index e3dd627ab9e..453a9af8e1f 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAffinityTerm.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAffinityTerm.libsonnet @@ -23,9 +23,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { namespaceSelector+: { matchLabels+: matchLabels } }, }, - '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, - '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), withTopologyKey(topologyKey): { topologyKey: topologyKey }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAntiAffinity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAntiAffinity.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podAntiAffinity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podAntiAffinity.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfig.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfig.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfig.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfigOption.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfigOption.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podDNSConfigOption.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podDNSConfigOption.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet new file mode 100644 index 00000000000..08951dc0c7d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podIP.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podIP', url='', help='"PodIP represents a single IP address allocated to the pod."'), + '#withIp':: d.fn(help='"IP is the IP address assigned to the pod"', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet new file mode 100644 index 00000000000..fd6b94965f2 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podOS.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podOS', url='', help='"PodOS defines the OS parameters of a pod."'), + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podReadinessGate.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podReadinessGate.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podReadinessGate.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podReadinessGate.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet new file mode 100644 index 00000000000..bbe0a5b228c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaim.libsonnet @@ -0,0 +1,15 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podResourceClaim', url='', help='"PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name."'), + '#source':: d.obj(help='"ClaimSource describes a reference to a ResourceClaim.\\n\\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value."'), + source: { + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { source+: { resourceClaimName: resourceClaimName } }, + '#withResourceClaimTemplateName':: d.fn(help='"ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\\n\\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\\n\\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim."', args=[d.arg(name='resourceClaimTemplateName', type=d.T.string)]), + withResourceClaimTemplateName(resourceClaimTemplateName): { source+: { resourceClaimTemplateName: resourceClaimTemplateName } }, + }, + '#withName':: d.fn(help='"Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet new file mode 100644 index 00000000000..11fa0c7833b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podResourceClaimStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podResourceClaimStatus', url='', help='"PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim."'), + '#withName':: d.fn(help='"Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withResourceClaimName':: d.fn(help='"ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case."', args=[d.arg(name='resourceClaimName', type=d.T.string)]), + withResourceClaimName(resourceClaimName): { resourceClaimName: resourceClaimName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet new file mode 100644 index 00000000000..072a4eb6562 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSchedulingGate.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingGate', url='', help='"PodSchedulingGate is associated to a Pod to guard its scheduling."'), + '#withName':: d.fn(help='"Name of the scheduling gate. Each scheduling gate must have a unique name field."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet similarity index 71% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet index a918355cad0..3515afc7ec1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSecurityContext.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSecurityContext.libsonnet @@ -14,7 +14,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { seccompProfile+: { localhostProfile: localhostProfile } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { seccompProfile+: { type: type } }, @@ -25,26 +25,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { windowsOptions+: { hostProcess: hostProcess } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { windowsOptions+: { runAsUserName: runAsUserName } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { fsGroup: fsGroup }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { fsGroupChangePolicy: fsGroupChangePolicy }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { runAsGroup: runAsGroup }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { runAsNonRoot: runAsNonRoot }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { runAsUser: runAsUser }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet similarity index 83% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet index fb01a200601..6312789a7c3 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podSpec.libsonnet @@ -55,6 +55,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { os+: { name: name } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -70,7 +75,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { securityContext+: { seccompProfile+: { type: type } } }, @@ -81,26 +86,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { securityContext+: { fsGroup: fsGroup } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { securityContext+: { runAsGroup: runAsGroup } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { securityContext+: { runAsNonRoot: runAsNonRoot } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { securityContext+: { runAsUser: runAsUser } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -115,9 +122,9 @@ withDnsPolicy(dnsPolicy): { dnsPolicy: dnsPolicy }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { enableServiceLinks: enableServiceLinks }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] }, @@ -129,11 +136,13 @@ withHostNetwork(hostNetwork): { hostNetwork: hostNetwork }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { hostPID: hostPID }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { hostUsers: hostUsers }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] }, @@ -145,26 +154,34 @@ withNodeSelector(nodeSelector): { nodeSelector: nodeSelector }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { nodeSelector+: nodeSelector }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { overhead: overhead }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { overhead+: overhead }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { priority: priority }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { priorityClassName: priorityClassName }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { restartPolicy: restartPolicy }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { runtimeClassName: runtimeClassName }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { schedulerName: schedulerName }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { serviceAccount: serviceAccount }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet similarity index 67% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet index 9601a60537a..e76c07d1286 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podStatus.libsonnet @@ -5,16 +5,20 @@ withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, '#withConditionsMixin':: d.fn(help='"Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, - '#withContainerStatuses':: d.fn(help='"The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='containerStatuses', type=d.T.array)]), + '#withContainerStatuses':: d.fn(help='"The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='containerStatuses', type=d.T.array)]), withContainerStatuses(containerStatuses): { containerStatuses: if std.isArray(v=containerStatuses) then containerStatuses else [containerStatuses] }, - '#withContainerStatusesMixin':: d.fn(help='"The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containerStatuses', type=d.T.array)]), + '#withContainerStatusesMixin':: d.fn(help='"The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='containerStatuses', type=d.T.array)]), withContainerStatusesMixin(containerStatuses): { containerStatuses+: if std.isArray(v=containerStatuses) then containerStatuses else [containerStatuses] }, - '#withEphemeralContainerStatuses':: d.fn(help='"Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature."', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), + '#withEphemeralContainerStatuses':: d.fn(help='"Status for any ephemeral containers that have run in this pod."', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), withEphemeralContainerStatuses(ephemeralContainerStatuses): { ephemeralContainerStatuses: if std.isArray(v=ephemeralContainerStatuses) then ephemeralContainerStatuses else [ephemeralContainerStatuses] }, - '#withEphemeralContainerStatusesMixin':: d.fn(help='"Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), + '#withEphemeralContainerStatusesMixin':: d.fn(help='"Status for any ephemeral containers that have run in this pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ephemeralContainerStatuses', type=d.T.array)]), withEphemeralContainerStatusesMixin(ephemeralContainerStatuses): { ephemeralContainerStatuses+: if std.isArray(v=ephemeralContainerStatuses) then ephemeralContainerStatuses else [ephemeralContainerStatuses] }, - '#withHostIP':: d.fn(help='"IP address of the host to which the pod is assigned. Empty if not yet scheduled."', args=[d.arg(name='hostIP', type=d.T.string)]), + '#withHostIP':: d.fn(help='"hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod"', args=[d.arg(name='hostIP', type=d.T.string)]), withHostIP(hostIP): { hostIP: hostIP }, + '#withHostIPs':: d.fn(help='"hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod."', args=[d.arg(name='hostIPs', type=d.T.array)]), + withHostIPs(hostIPs): { hostIPs: if std.isArray(v=hostIPs) then hostIPs else [hostIPs] }, + '#withHostIPsMixin':: d.fn(help='"hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hostIPs', type=d.T.array)]), + withHostIPsMixin(hostIPs): { hostIPs+: if std.isArray(v=hostIPs) then hostIPs else [hostIPs] }, '#withInitContainerStatuses':: d.fn(help='"The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"', args=[d.arg(name='initContainerStatuses', type=d.T.array)]), withInitContainerStatuses(initContainerStatuses): { initContainerStatuses: if std.isArray(v=initContainerStatuses) then initContainerStatuses else [initContainerStatuses] }, '#withInitContainerStatusesMixin':: d.fn(help='"The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='initContainerStatuses', type=d.T.array)]), @@ -25,16 +29,22 @@ withNominatedNodeName(nominatedNodeName): { nominatedNodeName: nominatedNodeName }, '#withPhase':: d.fn(help="\"The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\\n\\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\\n\\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase\"", args=[d.arg(name='phase', type=d.T.string)]), withPhase(phase): { phase: phase }, - '#withPodIP':: d.fn(help='"IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated."', args=[d.arg(name='podIP', type=d.T.string)]), + '#withPodIP':: d.fn(help='"podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated."', args=[d.arg(name='podIP', type=d.T.string)]), withPodIP(podIP): { podIP: podIP }, '#withPodIPs':: d.fn(help='"podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet."', args=[d.arg(name='podIPs', type=d.T.array)]), withPodIPs(podIPs): { podIPs: if std.isArray(v=podIPs) then podIPs else [podIPs] }, '#withPodIPsMixin':: d.fn(help='"podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podIPs', type=d.T.array)]), withPodIPsMixin(podIPs): { podIPs+: if std.isArray(v=podIPs) then podIPs else [podIPs] }, - '#withQosClass':: d.fn(help='"The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md"', args=[d.arg(name='qosClass', type=d.T.string)]), + '#withQosClass':: d.fn(help='"The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes"', args=[d.arg(name='qosClass', type=d.T.string)]), withQosClass(qosClass): { qosClass: qosClass }, '#withReason':: d.fn(help="\"A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, + '#withResize':: d.fn(help="\"Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \\\"Proposed\\", args=[d.arg(name='resize', type=d.T.string)]), + withResize(resize): { resize: resize }, + '#withResourceClaimStatuses':: d.fn(help='"Status of resource claims."', args=[d.arg(name='resourceClaimStatuses', type=d.T.array)]), + withResourceClaimStatuses(resourceClaimStatuses): { resourceClaimStatuses: if std.isArray(v=resourceClaimStatuses) then resourceClaimStatuses else [resourceClaimStatuses] }, + '#withResourceClaimStatusesMixin':: d.fn(help='"Status of resource claims."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaimStatuses', type=d.T.array)]), + withResourceClaimStatusesMixin(resourceClaimStatuses): { resourceClaimStatuses+: if std.isArray(v=resourceClaimStatuses) then resourceClaimStatuses else [resourceClaimStatuses] }, '#withStartTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='startTime', type=d.T.string)]), withStartTime(startTime): { startTime: startTime }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet index f24d76b16cf..4c12ae4ad6e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplate.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplate.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podTemplate', url='', help='"PodTemplate describes a template for creating copies of a predefined pod."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PodTemplate', args=[d.arg(name='name', type=d.T.string)]), @@ -55,12 +53,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -71,21 +67,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -93,9 +89,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -154,6 +150,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -169,7 +170,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -180,26 +181,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -214,9 +217,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -228,11 +231,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -244,26 +249,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet index 44510388a39..d0d92f58f3b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/podTemplateSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/podTemplateSpec.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podTemplateSpec', url='', help='"PodTemplateSpec describes the data a pod should have when created from a template"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -102,6 +100,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { os+: { name: name } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -117,7 +120,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { securityContext+: { seccompProfile+: { type: type } } } }, @@ -128,26 +131,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { securityContext+: { fsGroup: fsGroup } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { securityContext+: { runAsGroup: runAsGroup } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { securityContext+: { runAsUser: runAsUser } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -162,9 +167,9 @@ withDnsPolicy(dnsPolicy): { spec+: { dnsPolicy: dnsPolicy } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { enableServiceLinks: enableServiceLinks } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } }, @@ -176,11 +181,13 @@ withHostNetwork(hostNetwork): { spec+: { hostNetwork: hostNetwork } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { hostPID: hostPID } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { hostUsers: hostUsers } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { hostname: hostname } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } }, @@ -192,26 +199,34 @@ withNodeSelector(nodeSelector): { spec+: { nodeSelector: nodeSelector } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { nodeSelector+: nodeSelector } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { overhead: overhead } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { overhead+: overhead } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { preemptionPolicy: preemptionPolicy } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { priority: priority } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { priorityClassName: priorityClassName } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { restartPolicy: restartPolicy } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { runtimeClassName: runtimeClassName } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { schedulerName: schedulerName } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { serviceAccount: serviceAccount } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portStatus.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet similarity index 66% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet index 79564673866..82be17b17fd 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/portworxVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/portworxVolumeSource.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='portworxVolumeSource', url='', help='"PortworxVolumeSource represents a Portworx volume resource."'), - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { volumeID: volumeID }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/preferredSchedulingTerm.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/preferredSchedulingTerm.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/preferredSchedulingTerm.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/preferredSchedulingTerm.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet similarity index 88% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet index c19a814092e..bd982b3f476 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/probe.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/probe.libsonnet @@ -8,6 +8,13 @@ '#withCommandMixin':: d.fn(help="\"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='command', type=d.T.array)]), withCommandMixin(command): { exec+: { command+: if std.isArray(v=command) then command else [command] } }, }, + '#grpc':: d.obj(help=''), + grpc: { + '#withPort':: d.fn(help='"Port number of the gRPC service. Number must be in the range 1 to 65535."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { grpc+: { port: port } }, + '#withService':: d.fn(help='"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC."', args=[d.arg(name='service', type=d.T.string)]), + withService(service): { grpc+: { service: service } }, + }, '#httpGet':: d.obj(help='"HTTPGetAction describes an action based on HTTP Get requests."'), httpGet: { '#withHost':: d.fn(help='"Host name to connect to, defaults to the pod IP. You probably want to set \\"Host\\" in httpHeaders instead."', args=[d.arg(name='host', type=d.T.string)]), @@ -38,7 +45,7 @@ withPeriodSeconds(periodSeconds): { periodSeconds: periodSeconds }, '#withSuccessThreshold':: d.fn(help='"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1."', args=[d.arg(name='successThreshold', type=d.T.integer)]), withSuccessThreshold(successThreshold): { successThreshold: successThreshold }, - '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), + '#withTerminationGracePeriodSeconds':: d.fn(help="\"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.\"", args=[d.arg(name='terminationGracePeriodSeconds', type=d.T.integer)]), withTerminationGracePeriodSeconds(terminationGracePeriodSeconds): { terminationGracePeriodSeconds: terminationGracePeriodSeconds }, '#withTimeoutSeconds':: d.fn(help='"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes"', args=[d.arg(name='timeoutSeconds', type=d.T.integer)]), withTimeoutSeconds(timeoutSeconds): { timeoutSeconds: timeoutSeconds }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet new file mode 100644 index 00000000000..2bbb9a4d562 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/projectedVolumeSource.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='projectedVolumeSource', url='', help='"Represents a projected volume source"'), + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + withSources(sources): { sources: if std.isArray(v=sources) then sources else [sources] }, + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + withSourcesMixin(sources): { sources+: if std.isArray(v=sources) then sources else [sources] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet similarity index 76% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet index 65f5cc8af75..f83b09ce4f2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/quobyteVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/quobyteVolumeSource.libsonnet @@ -1,17 +1,17 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='quobyteVolumeSource', url='', help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { group: group }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { registry: registry }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { tenant: tenant }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { user: user }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { volume: volume }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..5aa18be5d7d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdPersistentVolumeSource.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rbdPersistentVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { keyring: keyring }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { pool: pool }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet new file mode 100644 index 00000000000..64840dff747 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/rbdVolumeSource.libsonnet @@ -0,0 +1,27 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='rbdVolumeSource', url='', help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + withImage(image): { image: image }, + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + withKeyring(keyring): { keyring: keyring }, + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitors(monitors): { monitors: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + withMonitorsMixin(monitors): { monitors+: if std.isArray(v=monitors) then monitors else [monitors] }, + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + withPool(pool): { pool: pool }, + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + withUser(user): { user: user }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet index 4356ae8ca69..db614455fba 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationController.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationController.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='replicationController', url='', help='"ReplicationController represents the configuration of a replication controller."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ReplicationController', args=[d.arg(name='name', type=d.T.string)]), @@ -57,12 +55,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { spec+: { template+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { spec+: { template+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { spec+: { template+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { spec+: { template+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -73,21 +69,21 @@ withFinalizers(finalizers): { spec+: { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { spec+: { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { spec+: { template+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { spec+: { template+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { spec+: { template+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { spec+: { template+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { spec+: { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { spec+: { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { template+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { template+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -95,9 +91,9 @@ withOwnerReferencesMixin(ownerReferences): { spec+: { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { spec+: { template+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { spec+: { template+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { template+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -156,6 +152,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { spec+: { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { template+: { spec+: { os+: { name: name } } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -171,7 +172,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } } }, @@ -182,26 +183,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { spec+: { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { spec+: { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { spec+: { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { spec+: { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { spec+: { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { spec+: { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { spec+: { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { spec+: { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -216,9 +219,9 @@ withDnsPolicy(dnsPolicy): { spec+: { template+: { spec+: { dnsPolicy: dnsPolicy } } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { spec+: { template+: { spec+: { enableServiceLinks: enableServiceLinks } } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { spec+: { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { spec+: { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } } }, @@ -230,11 +233,13 @@ withHostNetwork(hostNetwork): { spec+: { template+: { spec+: { hostNetwork: hostNetwork } } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { spec+: { template+: { spec+: { hostPID: hostPID } } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { spec+: { template+: { spec+: { hostUsers: hostUsers } } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { spec+: { template+: { spec+: { hostname: hostname } } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { spec+: { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { spec+: { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } } }, @@ -246,26 +251,34 @@ withNodeSelector(nodeSelector): { spec+: { template+: { spec+: { nodeSelector: nodeSelector } } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { spec+: { template+: { spec+: { nodeSelector+: nodeSelector } } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { spec+: { template+: { spec+: { overhead: overhead } } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { spec+: { template+: { spec+: { overhead+: overhead } } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { spec+: { template+: { spec+: { preemptionPolicy: preemptionPolicy } } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { spec+: { template+: { spec+: { priority: priority } } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { spec+: { template+: { spec+: { priorityClassName: priorityClassName } } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { spec+: { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { spec+: { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { spec+: { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { spec+: { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { spec+: { template+: { spec+: { restartPolicy: restartPolicy } } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { spec+: { template+: { spec+: { runtimeClassName: runtimeClassName } } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { spec+: { template+: { spec+: { schedulerName: schedulerName } } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { spec+: { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { spec+: { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { spec+: { template+: { spec+: { serviceAccount: serviceAccount } } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerCondition.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerCondition.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet index 0893916e78d..b1363bd00c5 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerSpec.libsonnet @@ -5,12 +5,10 @@ template: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { template+: { metadata+: { annotations: annotations } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { template+: { metadata+: { annotations+: annotations } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { template+: { metadata+: { clusterName: clusterName } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { template+: { metadata+: { creationTimestamp: creationTimestamp } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -21,21 +19,21 @@ withFinalizers(finalizers): { template+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { template+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { template+: { metadata+: { generateName: generateName } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { template+: { metadata+: { generation: generation } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { template+: { metadata+: { labels: labels } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { template+: { metadata+: { labels+: labels } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { template+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { template+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { template+: { metadata+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { template+: { metadata+: { namespace: namespace } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { template+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, @@ -43,9 +41,9 @@ withOwnerReferencesMixin(ownerReferences): { template+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { template+: { metadata+: { resourceVersion: resourceVersion } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { template+: { metadata+: { selfLink: selfLink } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { template+: { metadata+: { uid: uid } } }, }, '#spec':: d.obj(help='"PodSpec is a description of a pod."'), @@ -104,6 +102,11 @@ '#withSearchesMixin':: d.fn(help='"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='searches', type=d.T.array)]), withSearchesMixin(searches): { template+: { spec+: { dnsConfig+: { searches+: if std.isArray(v=searches) then searches else [searches] } } } }, }, + '#os':: d.obj(help='"PodOS defines the OS parameters of a pod."'), + os: { + '#withName':: d.fn(help='"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { template+: { spec+: { os+: { name: name } } } }, + }, '#securityContext':: d.obj(help='"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext."'), securityContext: { '#seLinuxOptions':: d.obj(help='"SELinuxOptions are the labels to be applied to the container"'), @@ -119,7 +122,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { template+: { spec+: { securityContext+: { seccompProfile+: { localhostProfile: localhostProfile } } } } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { template+: { spec+: { securityContext+: { seccompProfile+: { type: type } } } } }, @@ -130,26 +133,28 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } } } } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { template+: { spec+: { securityContext+: { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } } } } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { template+: { spec+: { securityContext+: { windowsOptions+: { hostProcess: hostProcess } } } } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { template+: { spec+: { securityContext+: { windowsOptions+: { runAsUserName: runAsUserName } } } } }, }, - '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), + '#withFsGroup':: d.fn(help="\"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\\n\\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='fsGroup', type=d.T.integer)]), withFsGroup(fsGroup): { template+: { spec+: { securityContext+: { fsGroup: fsGroup } } } }, - '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), + '#withFsGroupChangePolicy':: d.fn(help='"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \\"OnRootMismatch\\" and \\"Always\\". If not specified, \\"Always\\" is used. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='fsGroupChangePolicy', type=d.T.string)]), withFsGroupChangePolicy(fsGroupChangePolicy): { template+: { spec+: { securityContext+: { fsGroupChangePolicy: fsGroupChangePolicy } } } }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { template+: { spec+: { securityContext+: { runAsGroup: runAsGroup } } } }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { template+: { spec+: { securityContext+: { runAsNonRoot: runAsNonRoot } } } }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { template+: { spec+: { securityContext+: { runAsUser: runAsUser } } } }, - '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroups':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroups(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), + '#withSupplementalGroupsMixin':: d.fn(help="\"A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='supplementalGroups', type=d.T.array)]), withSupplementalGroupsMixin(supplementalGroups): { template+: { spec+: { securityContext+: { supplementalGroups+: if std.isArray(v=supplementalGroups) then supplementalGroups else [supplementalGroups] } } } }, - '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctls':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctls(sysctls): { template+: { spec+: { securityContext+: { sysctls: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, - '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), + '#withSysctlsMixin':: d.fn(help='"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sysctls', type=d.T.array)]), withSysctlsMixin(sysctls): { template+: { spec+: { securityContext+: { sysctls+: if std.isArray(v=sysctls) then sysctls else [sysctls] } } } }, }, '#withActiveDeadlineSeconds':: d.fn(help='"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer."', args=[d.arg(name='activeDeadlineSeconds', type=d.T.integer)]), @@ -164,9 +169,9 @@ withDnsPolicy(dnsPolicy): { template+: { spec+: { dnsPolicy: dnsPolicy } } }, '#withEnableServiceLinks':: d.fn(help="\"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.\"", args=[d.arg(name='enableServiceLinks', type=d.T.boolean)]), withEnableServiceLinks(enableServiceLinks): { template+: { spec+: { enableServiceLinks: enableServiceLinks } } }, - '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainers':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainers(ephemeralContainers): { template+: { spec+: { ephemeralContainers: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, - '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), + '#withEphemeralContainersMixin':: d.fn(help="\"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ephemeralContainers', type=d.T.array)]), withEphemeralContainersMixin(ephemeralContainers): { template+: { spec+: { ephemeralContainers+: if std.isArray(v=ephemeralContainers) then ephemeralContainers else [ephemeralContainers] } } }, '#withHostAliases':: d.fn(help="\"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.\"", args=[d.arg(name='hostAliases', type=d.T.array)]), withHostAliases(hostAliases): { template+: { spec+: { hostAliases: if std.isArray(v=hostAliases) then hostAliases else [hostAliases] } } }, @@ -178,11 +183,13 @@ withHostNetwork(hostNetwork): { template+: { spec+: { hostNetwork: hostNetwork } } }, '#withHostPID':: d.fn(help="\"Use the host's pid namespace. Optional: Default to false.\"", args=[d.arg(name='hostPID', type=d.T.boolean)]), withHostPID(hostPID): { template+: { spec+: { hostPID: hostPID } } }, + '#withHostUsers':: d.fn(help="\"Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\"", args=[d.arg(name='hostUsers', type=d.T.boolean)]), + withHostUsers(hostUsers): { template+: { spec+: { hostUsers: hostUsers } } }, '#withHostname':: d.fn(help="\"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.\"", args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { template+: { spec+: { hostname: hostname } } }, - '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecrets':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecrets(imagePullSecrets): { template+: { spec+: { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, - '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), + '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { template+: { spec+: { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] } } }, '#withInitContainers':: d.fn(help='"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/"', args=[d.arg(name='initContainers', type=d.T.array)]), withInitContainers(initContainers): { template+: { spec+: { initContainers: if std.isArray(v=initContainers) then initContainers else [initContainers] } } }, @@ -194,26 +201,34 @@ withNodeSelector(nodeSelector): { template+: { spec+: { nodeSelector: nodeSelector } } }, '#withNodeSelectorMixin':: d.fn(help="\"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='nodeSelector', type=d.T.object)]), withNodeSelectorMixin(nodeSelector): { template+: { spec+: { nodeSelector+: nodeSelector } } }, - '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverhead':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"', args=[d.arg(name='overhead', type=d.T.object)]), withOverhead(overhead): { template+: { spec+: { overhead: overhead } } }, - '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), + '#withOverheadMixin':: d.fn(help='"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='overhead', type=d.T.object)]), withOverheadMixin(overhead): { template+: { spec+: { overhead+: overhead } } }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { template+: { spec+: { preemptionPolicy: preemptionPolicy } } }, '#withPriority':: d.fn(help='"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority."', args=[d.arg(name='priority', type=d.T.integer)]), withPriority(priority): { template+: { spec+: { priority: priority } } }, '#withPriorityClassName':: d.fn(help="\"If specified, indicates the pod's priority. \\\"system-node-critical\\\" and \\\"system-cluster-critical\\\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.\"", args=[d.arg(name='priorityClassName', type=d.T.string)]), withPriorityClassName(priorityClassName): { template+: { spec+: { priorityClassName: priorityClassName } } }, - '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGates':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGates(readinessGates): { template+: { spec+: { readinessGates: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), + '#withReadinessGatesMixin':: d.fn(help='"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \\"True\\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='readinessGates', type=d.T.array)]), withReadinessGatesMixin(readinessGates): { template+: { spec+: { readinessGates+: if std.isArray(v=readinessGates) then readinessGates else [readinessGates] } } }, - '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), + '#withResourceClaims':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { template+: { spec+: { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { template+: { spec+: { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] } } }, + '#withRestartPolicy':: d.fn(help='"Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy"', args=[d.arg(name='restartPolicy', type=d.T.string)]), withRestartPolicy(restartPolicy): { template+: { spec+: { restartPolicy: restartPolicy } } }, - '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14."', args=[d.arg(name='runtimeClassName', type=d.T.string)]), + '#withRuntimeClassName':: d.fn(help='"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \\"legacy\\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class"', args=[d.arg(name='runtimeClassName', type=d.T.string)]), withRuntimeClassName(runtimeClassName): { template+: { spec+: { runtimeClassName: runtimeClassName } } }, '#withSchedulerName':: d.fn(help='"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler."', args=[d.arg(name='schedulerName', type=d.T.string)]), withSchedulerName(schedulerName): { template+: { spec+: { schedulerName: schedulerName } } }, + '#withSchedulingGates':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGates(schedulingGates): { template+: { spec+: { schedulingGates: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, + '#withSchedulingGatesMixin':: d.fn(help='"SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\\n\\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\\n\\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='schedulingGates', type=d.T.array)]), + withSchedulingGatesMixin(schedulingGates): { template+: { spec+: { schedulingGates+: if std.isArray(v=schedulingGates) then schedulingGates else [schedulingGates] } } }, '#withServiceAccount':: d.fn(help='"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead."', args=[d.arg(name='serviceAccount', type=d.T.string)]), withServiceAccount(serviceAccount): { template+: { spec+: { serviceAccount: serviceAccount } } }, '#withServiceAccountName':: d.fn(help='"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/"', args=[d.arg(name='serviceAccountName', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet similarity index 91% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet index a0a72a9b700..b1a60d4b1d1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/replicationControllerStatus.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/replicationControllerStatus.libsonnet @@ -13,7 +13,7 @@ withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, '#withReadyReplicas':: d.fn(help='"The number of ready replicas for this replication controller."', args=[d.arg(name='readyReplicas', type=d.T.integer)]), withReadyReplicas(readyReplicas): { readyReplicas: readyReplicas }, - '#withReplicas':: d.fn(help='"Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), + '#withReplicas':: d.fn(help='"Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller"', args=[d.arg(name='replicas', type=d.T.integer)]), withReplicas(replicas): { replicas: replicas }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet new file mode 100644 index 00000000000..b1275e4a101 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceClaim.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaim', url='', help='"ResourceClaim references one entry in PodSpec.ResourceClaims."'), + '#withName':: d.fn(help='"Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet new file mode 100644 index 00000000000..27211745973 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceFieldSelector.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceFieldSelector', url='', help='"ResourceFieldSelector represents container resources (cpu, memory) and their output format"'), + '#withContainerName':: d.fn(help='"Container name: required for volumes, optional for env vars"', args=[d.arg(name='containerName', type=d.T.string)]), + withContainerName(containerName): { containerName: containerName }, + '#withDivisor':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='divisor', type=d.T.string)]), + withDivisor(divisor): { divisor: divisor }, + '#withResource':: d.fn(help='"Required: resource to select"', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet similarity index 86% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet index c867c8321bb..b8fd09886ad 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuota.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuota.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='resourceQuota', url='', help='"ResourceQuota sets aggregate quota restrictions enforced per namespace"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ResourceQuota', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaSpec.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaSpec.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceQuotaStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceQuotaStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet new file mode 100644 index 00000000000..e760fe89837 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/resourceRequirements.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceRequirements', url='', help='"ResourceRequirements describes the compute resource requirements."'), + '#withClaims':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."', args=[d.arg(name='claims', type=d.T.array)]), + withClaims(claims): { claims: if std.isArray(v=claims) then claims else [claims] }, + '#withClaimsMixin':: d.fn(help='"Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\\n\\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='claims', type=d.T.array)]), + withClaimsMixin(claims): { claims+: if std.isArray(v=claims) then claims else [claims] }, + '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), + withLimits(limits): { limits: limits }, + '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), + withLimitsMixin(limits): { limits+: limits }, + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + withRequests(requests): { requests: requests }, + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + withRequestsMixin(requests): { requests+: requests }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet new file mode 100644 index 00000000000..b5b776df14d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOPersistentVolumeSource.libsonnet @@ -0,0 +1,31 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleIOPersistentVolumeSource', url='', help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), + '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + secretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { secretRef+: { namespace: namespace } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { gateway: gateway }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { storageMode: storageMode }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { storagePool: storagePool }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { system: system }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { volumeName: volumeName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet new file mode 100644 index 00000000000..4cc797c41a8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scaleIOVolumeSource.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='scaleIOVolumeSource', url='', help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), + '#secretRef':: d.obj(help='"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace."'), + secretRef: { + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secretRef+: { name: name } }, + }, + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + withGateway(gateway): { gateway: gateway }, + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + withProtectionDomain(protectionDomain): { protectionDomain: protectionDomain }, + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + withReadOnly(readOnly): { readOnly: readOnly }, + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + withSslEnabled(sslEnabled): { sslEnabled: sslEnabled }, + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + withStorageMode(storageMode): { storageMode: storageMode }, + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + withStoragePool(storagePool): { storagePool: storagePool }, + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + withSystem(system): { system: system }, + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + withVolumeName(volumeName): { volumeName: volumeName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopeSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopeSelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopeSelector.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopeSelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/scopedResourceSelectorRequirement.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seLinuxOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seLinuxOptions.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seLinuxOptions.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seLinuxOptions.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet index 3228b65c2d6..9a34b45f5af 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/seccompProfile.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/seccompProfile.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='seccompProfile', url='', help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { localhostProfile: localhostProfile }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet index 868d6a33ba7..2f3784215c7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secret.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secret.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='secret', url='', help='"Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Secret', args=[d.arg(name='name', type=d.T.string)]), @@ -61,7 +59,7 @@ withStringData(stringData): { stringData: stringData }, '#withStringDataMixin':: d.fn(help='"stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='stringData', type=d.T.object)]), withStringDataMixin(stringData): { stringData+: stringData }, - '#withType':: d.fn(help='"Used to facilitate programmatic handling of secret data."', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretEnvSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretEnvSource.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretEnvSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretEnvSource.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretKeySelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretKeySelector.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretKeySelector.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretKeySelector.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet new file mode 100644 index 00000000000..b36accdbc13 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretProjection.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='secretProjection', url='', help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet similarity index 80% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet index 764d03eb2d1..de3e3d3e520 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/secretReference.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretReference.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='secretReference', url='', help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet new file mode 100644 index 00000000000..8e012783cec --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/secretVolumeSource.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='secretVolumeSource', url='', help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + withDefaultMode(defaultMode): { defaultMode: defaultMode }, + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { items: if std.isArray(v=items) then items else [items] }, + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { items+: if std.isArray(v=items) then items else [items] }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { optional: optional }, + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet index a9c9a201c4b..1d9d290c584 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/securityContext.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/securityContext.libsonnet @@ -25,7 +25,7 @@ }, '#seccompProfile':: d.obj(help="\"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.\""), seccompProfile: { - '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \\\"Localhost\\\".\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), + '#withLocalhostProfile':: d.fn(help="\"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \\\"Localhost\\\". Must NOT be set for any other type.\"", args=[d.arg(name='localhostProfile', type=d.T.string)]), withLocalhostProfile(localhostProfile): { seccompProfile+: { localhostProfile: localhostProfile } }, '#withType':: d.fn(help='"type indicates which kind of seccomp profile will be applied. Valid options are:\\n\\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { seccompProfile+: { type: type } }, @@ -36,22 +36,24 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { windowsOptions+: { gmsaCredentialSpec: gmsaCredentialSpec } }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { windowsOptions+: { gmsaCredentialSpecName: gmsaCredentialSpecName } }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { windowsOptions+: { hostProcess: hostProcess } }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { windowsOptions+: { runAsUserName: runAsUserName } }, }, - '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN"', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), + '#withAllowPrivilegeEscalation':: d.fn(help='"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='allowPrivilegeEscalation', type=d.T.boolean)]), withAllowPrivilegeEscalation(allowPrivilegeEscalation): { allowPrivilegeEscalation: allowPrivilegeEscalation }, - '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false."', args=[d.arg(name='privileged', type=d.T.boolean)]), + '#withPrivileged':: d.fn(help='"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='privileged', type=d.T.boolean)]), withPrivileged(privileged): { privileged: privileged }, - '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled."', args=[d.arg(name='procMount', type=d.T.string)]), + '#withProcMount':: d.fn(help='"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='procMount', type=d.T.string)]), withProcMount(procMount): { procMount: procMount }, - '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), + '#withReadOnlyRootFilesystem':: d.fn(help='"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='readOnlyRootFilesystem', type=d.T.boolean)]), withReadOnlyRootFilesystem(readOnlyRootFilesystem): { readOnlyRootFilesystem: readOnlyRootFilesystem }, - '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), + '#withRunAsGroup':: d.fn(help='"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsGroup', type=d.T.integer)]), withRunAsGroup(runAsGroup): { runAsGroup: runAsGroup }, '#withRunAsNonRoot':: d.fn(help='"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsNonRoot', type=d.T.boolean)]), withRunAsNonRoot(runAsNonRoot): { runAsNonRoot: runAsNonRoot }, - '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUser', type=d.T.integer)]), + '#withRunAsUser':: d.fn(help='"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows."', args=[d.arg(name='runAsUser', type=d.T.integer)]), withRunAsUser(runAsUser): { runAsUser: runAsUser }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet similarity index 70% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet index aabed7a52eb..ede4de85b04 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/service.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/service.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='service', url='', help='"Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Service', args=[d.arg(name='name', type=d.T.string)]), @@ -61,13 +59,13 @@ withTimeoutSeconds(timeoutSeconds): { spec+: { sessionAffinityConfig+: { clientIP+: { timeoutSeconds: timeoutSeconds } } } }, }, }, - '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), + '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), withAllocateLoadBalancerNodePorts(allocateLoadBalancerNodePorts): { spec+: { allocateLoadBalancerNodePorts: allocateLoadBalancerNodePorts } }, '#withClusterIP':: d.fn(help='"clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIP', type=d.T.string)]), withClusterIP(clusterIP): { spec+: { clusterIP: clusterIP } }, - '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPs(clusterIPs): { spec+: { clusterIPs: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] } }, - '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPsMixin(clusterIPs): { spec+: { clusterIPs+: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] } }, '#withExternalIPs':: d.fn(help='"externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system."', args=[d.arg(name='externalIPs', type=d.T.array)]), withExternalIPs(externalIPs): { spec+: { externalIPs: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] } }, @@ -75,25 +73,25 @@ withExternalIPsMixin(externalIPs): { spec+: { externalIPs+: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] } }, '#withExternalName':: d.fn(help='"externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \\"ExternalName\\"."', args=[d.arg(name='externalName', type=d.T.string)]), withExternalName(externalName): { spec+: { externalName: externalName } }, - '#withExternalTrafficPolicy':: d.fn(help='"externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \\"Local\\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \\"Cluster\\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading."', args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), + '#withExternalTrafficPolicy':: d.fn(help="\"externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \\\"externally-facing\\\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \\\"Local\\\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \\\"Cluster\\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \\\"Cluster\\\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.\"", args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), withExternalTrafficPolicy(externalTrafficPolicy): { spec+: { externalTrafficPolicy: externalTrafficPolicy } }, - '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type)."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), + '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), withHealthCheckNodePort(healthCheckNodePort): { spec+: { healthCheckNodePort: healthCheckNodePort } }, - '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \\"Cluster\\" routes internal traffic to a Service to all endpoints. \\"Local\\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \\"Cluster\\"."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), + '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \\"Local\\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \\"Cluster\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features)."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), withInternalTrafficPolicy(internalTrafficPolicy): { spec+: { internalTrafficPolicy: internalTrafficPolicy } }, - '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamilies(ipFamilies): { spec+: { ipFamilies: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] } }, - '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamiliesMixin(ipFamilies): { spec+: { ipFamilies+: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] } }, - '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \\"IPv6DualStack\\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), + '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), withIpFamilyPolicy(ipFamilyPolicy): { spec+: { ipFamilyPolicy: ipFamilyPolicy } }, '#withLoadBalancerClass':: d.fn(help="\"loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\"", args=[d.arg(name='loadBalancerClass', type=d.T.string)]), withLoadBalancerClass(loadBalancerClass): { spec+: { loadBalancerClass: loadBalancerClass } }, - '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), + '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), withLoadBalancerIP(loadBalancerIP): { spec+: { loadBalancerIP: loadBalancerIP } }, - '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRanges(loadBalancerSourceRanges): { spec+: { loadBalancerSourceRanges: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] } }, - '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRangesMixin(loadBalancerSourceRanges): { spec+: { loadBalancerSourceRanges+: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] } }, '#withPorts':: d.fn(help='"The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { spec+: { ports: if std.isArray(v=ports) then ports else [ports] } }, @@ -107,10 +105,6 @@ withSelectorMixin(selector): { spec+: { selector+: selector } }, '#withSessionAffinity':: d.fn(help='"Supports \\"ClientIP\\" and \\"None\\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='sessionAffinity', type=d.T.string)]), withSessionAffinity(sessionAffinity): { spec+: { sessionAffinity: sessionAffinity } }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { spec+: { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] } }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { spec+: { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] } }, '#withType':: d.fn(help='"type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \\"ClusterIP\\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \\"None\\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \\"NodePort\\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \\"LoadBalancer\\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \\"ExternalName\\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { type: type } }, }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet index f67ff388c54..c8fd262c34f 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccount.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccount.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='serviceAccount', url='', help='"ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ServiceAccount', args=[d.arg(name='name', type=d.T.string)]), @@ -57,9 +55,9 @@ withImagePullSecrets(imagePullSecrets): { imagePullSecrets: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, '#withImagePullSecretsMixin':: d.fn(help='"ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='imagePullSecrets', type=d.T.array)]), withImagePullSecretsMixin(imagePullSecrets): { imagePullSecrets+: if std.isArray(v=imagePullSecrets) then imagePullSecrets else [imagePullSecrets] }, - '#withSecrets':: d.fn(help='"Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret"', args=[d.arg(name='secrets', type=d.T.array)]), + '#withSecrets':: d.fn(help='"Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \\"kubernetes.io/enforce-mountable-secrets\\" annotation set to \\"true\\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret"', args=[d.arg(name='secrets', type=d.T.array)]), withSecrets(secrets): { secrets: if std.isArray(v=secrets) then secrets else [secrets] }, - '#withSecretsMixin':: d.fn(help='"Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='secrets', type=d.T.array)]), + '#withSecretsMixin':: d.fn(help='"Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \\"kubernetes.io/enforce-mountable-secrets\\" annotation set to \\"true\\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='secrets', type=d.T.array)]), withSecretsMixin(secrets): { secrets+: if std.isArray(v=secrets) then secrets else [secrets] }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet similarity index 87% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet index 8b1dac4db2b..17e6b902215 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceAccountTokenProjection.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceAccountTokenProjection.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='serviceAccountTokenProjection', url='', help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, - '#withPath':: d.fn(help='"Path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet similarity index 68% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet index 69d7a842877..5a1fa5a5a04 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/servicePort.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/servicePort.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='servicePort', url='', help="\"ServicePort contains information on service's port.\""), - '#withAppProtocol':: d.fn(help='"The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default."', args=[d.arg(name='appProtocol', type=d.T.string)]), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), withAppProtocol(appProtocol): { appProtocol: appProtocol }, '#withName':: d.fn(help="\"The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.\"", args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet similarity index 63% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet index 8d3b794393a..0f8645393d7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceSpec.libsonnet @@ -9,13 +9,13 @@ withTimeoutSeconds(timeoutSeconds): { sessionAffinityConfig+: { clientIP+: { timeoutSeconds: timeoutSeconds } } }, }, }, - '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), + '#withAllocateLoadBalancerNodePorts':: d.fn(help='"allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \\"true\\". It may be set to \\"false\\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type."', args=[d.arg(name='allocateLoadBalancerNodePorts', type=d.T.boolean)]), withAllocateLoadBalancerNodePorts(allocateLoadBalancerNodePorts): { allocateLoadBalancerNodePorts: allocateLoadBalancerNodePorts }, '#withClusterIP':: d.fn(help='"clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIP', type=d.T.string)]), withClusterIP(clusterIP): { clusterIP: clusterIP }, - '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPs':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPs(clusterIPs): { clusterIPs: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] }, - '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nUnless the \\"IPv6DualStack\\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), + '#withClusterIPsMixin':: d.fn(help='"ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \\"None\\", empty string (\\"\\"), or a valid IP address. Setting this to \\"None\\" makes a \\"headless service\\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='clusterIPs', type=d.T.array)]), withClusterIPsMixin(clusterIPs): { clusterIPs+: if std.isArray(v=clusterIPs) then clusterIPs else [clusterIPs] }, '#withExternalIPs':: d.fn(help='"externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system."', args=[d.arg(name='externalIPs', type=d.T.array)]), withExternalIPs(externalIPs): { externalIPs: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] }, @@ -23,25 +23,25 @@ withExternalIPsMixin(externalIPs): { externalIPs+: if std.isArray(v=externalIPs) then externalIPs else [externalIPs] }, '#withExternalName':: d.fn(help='"externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \\"ExternalName\\"."', args=[d.arg(name='externalName', type=d.T.string)]), withExternalName(externalName): { externalName: externalName }, - '#withExternalTrafficPolicy':: d.fn(help='"externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \\"Local\\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \\"Cluster\\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading."', args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), + '#withExternalTrafficPolicy':: d.fn(help="\"externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \\\"externally-facing\\\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \\\"Local\\\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \\\"Cluster\\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \\\"Cluster\\\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.\"", args=[d.arg(name='externalTrafficPolicy', type=d.T.string)]), withExternalTrafficPolicy(externalTrafficPolicy): { externalTrafficPolicy: externalTrafficPolicy }, - '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type)."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), + '#withHealthCheckNodePort':: d.fn(help='"healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set."', args=[d.arg(name='healthCheckNodePort', type=d.T.integer)]), withHealthCheckNodePort(healthCheckNodePort): { healthCheckNodePort: healthCheckNodePort }, - '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \\"Cluster\\" routes internal traffic to a Service to all endpoints. \\"Local\\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \\"Cluster\\"."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), + '#withInternalTrafficPolicy':: d.fn(help='"InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \\"Local\\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \\"Cluster\\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features)."', args=[d.arg(name='internalTrafficPolicy', type=d.T.string)]), withInternalTrafficPolicy(internalTrafficPolicy): { internalTrafficPolicy: internalTrafficPolicy }, - '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamilies':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamilies(ipFamilies): { ipFamilies: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] }, - '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \\"IPv6DualStack\\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), + '#withIpFamiliesMixin':: d.fn(help='"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \\"IPv4\\" and \\"IPv6\\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \\"headless\\" services. This field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ipFamilies', type=d.T.array)]), withIpFamiliesMixin(ipFamilies): { ipFamilies+: if std.isArray(v=ipFamilies) then ipFamilies else [ipFamilies] }, - '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \\"IPv6DualStack\\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), + '#withIpFamilyPolicy':: d.fn(help='"IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \\"SingleStack\\" (a single IP family), \\"PreferDualStack\\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \\"RequireDualStack\\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName."', args=[d.arg(name='ipFamilyPolicy', type=d.T.string)]), withIpFamilyPolicy(ipFamilyPolicy): { ipFamilyPolicy: ipFamilyPolicy }, '#withLoadBalancerClass':: d.fn(help="\"loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\"", args=[d.arg(name='loadBalancerClass', type=d.T.string)]), withLoadBalancerClass(loadBalancerClass): { loadBalancerClass: loadBalancerClass }, - '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), + '#withLoadBalancerIP':: d.fn(help='"Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available."', args=[d.arg(name='loadBalancerIP', type=d.T.string)]), withLoadBalancerIP(loadBalancerIP): { loadBalancerIP: loadBalancerIP }, - '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRanges':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRanges(loadBalancerSourceRanges): { loadBalancerSourceRanges: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] }, - '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), + '#withLoadBalancerSourceRangesMixin':: d.fn(help='"If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='loadBalancerSourceRanges', type=d.T.array)]), withLoadBalancerSourceRangesMixin(loadBalancerSourceRanges): { loadBalancerSourceRanges+: if std.isArray(v=loadBalancerSourceRanges) then loadBalancerSourceRanges else [loadBalancerSourceRanges] }, '#withPorts':: d.fn(help='"The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='ports', type=d.T.array)]), withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, @@ -55,10 +55,6 @@ withSelectorMixin(selector): { selector+: selector }, '#withSessionAffinity':: d.fn(help='"Supports \\"ClientIP\\" and \\"None\\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies"', args=[d.arg(name='sessionAffinity', type=d.T.string)]), withSessionAffinity(sessionAffinity): { sessionAffinity: sessionAffinity }, - '#withTopologyKeys':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeys(topologyKeys): { topologyKeys: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, - '#withTopologyKeysMixin':: d.fn(help='"topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \\"*\\" may be used to mean \\"any topology\\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='topologyKeys', type=d.T.array)]), - withTopologyKeysMixin(topologyKeys): { topologyKeys+: if std.isArray(v=topologyKeys) then topologyKeys else [topologyKeys] }, '#withType':: d.fn(help='"type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \\"ClusterIP\\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \\"None\\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \\"NodePort\\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \\"LoadBalancer\\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \\"ExternalName\\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceStatus.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/serviceStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/serviceStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sessionAffinityConfig.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sessionAffinityConfig.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sessionAffinityConfig.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sessionAffinityConfig.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet new file mode 100644 index 00000000000..e863a06e935 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sleepAction.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='sleepAction', url='', help='"SleepAction describes a \\"sleep\\" action."'), + '#withSeconds':: d.fn(help='"Seconds is the number of seconds to sleep."', args=[d.arg(name='seconds', type=d.T.integer)]), + withSeconds(seconds): { seconds: seconds }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet index b41c58aebfb..f200e9a0fac 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSPersistentVolumeSource.libsonnet @@ -18,13 +18,13 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { secretRef+: { uid: uid } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { volumeNamespace: volumeNamespace }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet similarity index 70% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet index 2aa3947116a..5e2cbf6b593 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/storageOSVolumeSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/storageOSVolumeSource.libsonnet @@ -6,13 +6,13 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { secretRef+: { name: name } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fsType: fsType }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { readOnly: readOnly }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { volumeName: volumeName }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { volumeNamespace: volumeNamespace }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sysctl.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sysctl.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/sysctl.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/sysctl.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/taint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/taint.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/taint.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/taint.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/tcpSocketAction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/tcpSocketAction.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/tcpSocketAction.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/tcpSocketAction.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/toleration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/toleration.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/toleration.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/toleration.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorLabelRequirement.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorLabelRequirement.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorLabelRequirement.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorLabelRequirement.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorTerm.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorTerm.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/topologySelectorTerm.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySelectorTerm.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet new file mode 100644 index 00000000000..411dde629c7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/topologySpreadConstraint.libsonnet @@ -0,0 +1,33 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='topologySpreadConstraint', url='', help='"TopologySpreadConstraint specifies how to spread matching pods among the given topology."'), + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { labelSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { labelSelector+: { matchLabels+: matchLabels } }, + }, + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\\n\\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] }, + '#withMaxSkew':: d.fn(help="\"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.\"", args=[d.arg(name='maxSkew', type=d.T.integer)]), + withMaxSkew(maxSkew): { maxSkew: maxSkew }, + '#withMinDomains':: d.fn(help="\"MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \\\"global minimum\\\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\\n\\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \\\"global minimum\\\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\\n\\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).\"", args=[d.arg(name='minDomains', type=d.T.integer)]), + withMinDomains(minDomains): { minDomains: minDomains }, + '#withNodeAffinityPolicy':: d.fn(help="\"NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\\n\\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\"", args=[d.arg(name='nodeAffinityPolicy', type=d.T.string)]), + withNodeAffinityPolicy(nodeAffinityPolicy): { nodeAffinityPolicy: nodeAffinityPolicy }, + '#withNodeTaintsPolicy':: d.fn(help='"NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\\n\\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."', args=[d.arg(name='nodeTaintsPolicy', type=d.T.string)]), + withNodeTaintsPolicy(nodeTaintsPolicy): { nodeTaintsPolicy: nodeTaintsPolicy }, + '#withTopologyKey':: d.fn(help="\"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \\\"bucket\\\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \\\"kubernetes.io/hostname\\\", each Node is a domain of that topology. And, if TopologyKey is \\\"topology.kubernetes.io/zone\\\", each zone is a domain of that topology. It's a required field.\"", args=[d.arg(name='topologyKey', type=d.T.string)]), + withTopologyKey(topologyKey): { topologyKey: topologyKey }, + '#withWhenUnsatisfiable':: d.fn(help="\"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\\n but giving higher precedence to topologies that would help reduce the\\n skew.\\nA constraint is considered \\\"Unsatisfiable\\\" for an incoming pod if and only if every possible node assignment for that pod would violate \\\"MaxSkew\\\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\"", args=[d.arg(name='whenUnsatisfiable', type=d.T.string)]), + withWhenUnsatisfiable(whenUnsatisfiable): { whenUnsatisfiable: whenUnsatisfiable }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/typedLocalObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedLocalObjectReference.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/typedLocalObjectReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedLocalObjectReference.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet new file mode 100644 index 00000000000..5ab88f46f54 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/typedObjectReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='typedObjectReference', url='', help=''), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet similarity index 57% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet index bd3ec56b643..f78582150aa 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volume.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volume.libsonnet @@ -3,37 +3,37 @@ '#':: d.pkg(name='volume', url='', help='"Volume represents a named volume in a pod that may be accessed by any container in the pod."'), '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { awsElasticBlockStore+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { awsElasticBlockStore+: { partition: partition } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { awsElasticBlockStore+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { awsElasticBlockStore+: { volumeID: volumeID } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { azureDisk+: { cachingMode: cachingMode } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { azureDisk+: { diskName: diskName } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { azureDisk+: { diskURI: diskURI } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { azureDisk+: { fsType: fsType } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { azureDisk+: { kind: kind } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureDisk+: { readOnly: readOnly } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { azureFile+: { readOnly: readOnly } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { azureFile+: { secretName: secretName } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { azureFile+: { shareName: shareName } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), @@ -43,17 +43,17 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cephfs+: { secretRef+: { name: name } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { cephfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cephfs+: { readOnly: readOnly } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { cephfs+: { secretFile: secretFile } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { cephfs+: { user: user } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), @@ -63,24 +63,24 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { cinder+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { cinder+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { cinder+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { cinder+: { volumeID: volumeID } }, }, '#configMap':: d.obj(help="\"Adapts a ConfigMap into a volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.\""), configMap: { - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { configMap+: { defaultMode: defaultMode } }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { configMap+: { name: name } }, - '#withOptional':: d.fn(help='"Specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), withOptional(optional): { configMap+: { optional: optional } }, }, '#csi':: d.obj(help='"Represents a source location of a volume to mount, managed by an external CSI driver"'), @@ -90,15 +90,15 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { csi+: { nodePublishSecretRef+: { name: name } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { csi+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { csi+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { csi+: { readOnly: readOnly } }, - '#withVolumeAttributes':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { csi+: { volumeAttributes: volumeAttributes } }, - '#withVolumeAttributesMixin':: d.fn(help="\"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help="\"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { csi+: { volumeAttributes+: volumeAttributes } }, }, '#downwardAPI':: d.obj(help='"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling."'), @@ -112,9 +112,9 @@ }, '#emptyDir':: d.obj(help='"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling."'), emptyDir: { - '#withMedium':: d.fn(help="\"What type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), + '#withMedium':: d.fn(help="\"medium represents what type of storage medium should back this directory. The default is \\\"\\\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\"", args=[d.arg(name='medium', type=d.T.string)]), withMedium(medium): { emptyDir+: { medium: medium } }, - '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), + '#withSizeLimit':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='sizeLimit', type=d.T.string)]), withSizeLimit(sizeLimit): { emptyDir+: { sizeLimit: sizeLimit } }, }, '#ephemeral':: d.obj(help='"Represents an ephemeral volume that is handled by a normal storage driver."'), @@ -123,12 +123,10 @@ volumeClaimTemplate: { '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations: annotations } } } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { ephemeral+: { volumeClaimTemplate+: { metadata+: { annotations+: annotations } } } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { ephemeral+: { volumeClaimTemplate+: { metadata+: { clusterName: clusterName } } } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { ephemeral+: { volumeClaimTemplate+: { metadata+: { creationTimestamp: creationTimestamp } } } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -139,21 +137,21 @@ withFinalizers(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { ephemeral+: { volumeClaimTemplate+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { ephemeral+: { volumeClaimTemplate+: { metadata+: { generateName: generateName } } } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { ephemeral+: { volumeClaimTemplate+: { metadata+: { generation: generation } } } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels: labels } } } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { ephemeral+: { volumeClaimTemplate+: { metadata+: { labels+: labels } } } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { ephemeral+: { volumeClaimTemplate+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { ephemeral+: { volumeClaimTemplate+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { ephemeral+: { volumeClaimTemplate+: { metadata+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { metadata+: { namespace: namespace } } } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { ephemeral+: { volumeClaimTemplate+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, @@ -161,9 +159,9 @@ withOwnerReferencesMixin(ownerReferences): { ephemeral+: { volumeClaimTemplate+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { ephemeral+: { volumeClaimTemplate+: { metadata+: { resourceVersion: resourceVersion } } } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { ephemeral+: { volumeClaimTemplate+: { metadata+: { selfLink: selfLink } } } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { ephemeral+: { volumeClaimTemplate+: { metadata+: { uid: uid } } } }, }, '#spec':: d.obj(help='"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes"'), @@ -177,15 +175,26 @@ '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSource+: { name: name } } } } }, }, - '#resources':: d.obj(help='"ResourceRequirements describes the compute resource requirements."'), + '#dataSourceRef':: d.obj(help=''), + dataSourceRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { apiGroup: apiGroup } } } } }, + '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { kind: kind } } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help="\"Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\"", args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { ephemeral+: { volumeClaimTemplate+: { spec+: { dataSourceRef+: { namespace: namespace } } } } }, + }, + '#resources':: d.obj(help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), resources: { '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits: limits } } } } }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { limits+: limits } } } } }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests: requests } } } } }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { ephemeral+: { volumeClaimTemplate+: { spec+: { resources+: { requests+: requests } } } } }, }, '#selector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -199,34 +208,36 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { ephemeral+: { volumeClaimTemplate+: { spec+: { selector+: { matchLabels+: matchLabels } } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { ephemeral+: { volumeClaimTemplate+: { spec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withStorageClassName':: d.fn(help='"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1"', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help="\"volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\"", args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeMode: volumeMode } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the binding reference to the PersistentVolume backing this claim."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { ephemeral+: { volumeClaimTemplate+: { spec+: { volumeName: volumeName } } } }, }, }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { fc+: { fsType: fsType } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { fc+: { lun: lun } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { fc+: { readOnly: readOnly } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } }, }, '#flexVolume':: d.obj(help='"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin."'), @@ -236,58 +247,58 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { flexVolume+: { secretRef+: { name: name } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { flexVolume+: { driver: driver } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { flexVolume+: { fsType: fsType } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { flexVolume+: { options: options } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { flexVolume+: { options+: options } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { flexVolume+: { readOnly: readOnly } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { flocker+: { datasetName: datasetName } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { flocker+: { datasetUUID: datasetUUID } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { gcePersistentDisk+: { fsType: fsType } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { gcePersistentDisk+: { partition: partition } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { gcePersistentDisk+: { pdName: pdName } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { gcePersistentDisk+: { readOnly: readOnly } }, }, '#gitRepo':: d.obj(help="\"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\\n\\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.\""), gitRepo: { - '#withDirectory':: d.fn(help="\"Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), + '#withDirectory':: d.fn(help="\"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.\"", args=[d.arg(name='directory', type=d.T.string)]), withDirectory(directory): { gitRepo+: { directory: directory } }, - '#withRepository':: d.fn(help='"Repository URL"', args=[d.arg(name='repository', type=d.T.string)]), + '#withRepository':: d.fn(help='"repository is the URL"', args=[d.arg(name='repository', type=d.T.string)]), withRepository(repository): { gitRepo+: { repository: repository } }, - '#withRevision':: d.fn(help='"Commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), + '#withRevision':: d.fn(help='"revision is the commit hash for the specified revision."', args=[d.arg(name='revision', type=d.T.string)]), withRevision(revision): { gitRepo+: { revision: revision } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { glusterfs+: { endpoints: endpoints } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { glusterfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { glusterfs+: { readOnly: readOnly } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { hostPath+: { path: path } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { hostPath+: { type: type } }, }, '#iscsi':: d.obj(help='"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), @@ -297,83 +308,83 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { iscsi+: { secretRef+: { name: name } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { iscsi+: { chapAuthSession: chapAuthSession } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { iscsi+: { fsType: fsType } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { iscsi+: { initiatorName: initiatorName } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is the target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { iscsi+: { iqn: iqn } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { iscsi+: { iscsiInterface: iscsiInterface } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun represents iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { iscsi+: { lun: lun } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { iscsi+: { readOnly: readOnly } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { iscsi+: { targetPortal: targetPortal } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { nfs+: { path: path } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { nfs+: { readOnly: readOnly } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { nfs+: { server: server } }, }, '#persistentVolumeClaim':: d.obj(help="\"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).\""), persistentVolumeClaim: { - '#withClaimName':: d.fn(help='"ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), + '#withClaimName':: d.fn(help='"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"', args=[d.arg(name='claimName', type=d.T.string)]), withClaimName(claimName): { persistentVolumeClaim+: { claimName: claimName } }, - '#withReadOnly':: d.fn(help='"Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Will force the ReadOnly setting in VolumeMounts. Default false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { persistentVolumeClaim+: { readOnly: readOnly } }, }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { photonPersistentDisk+: { fsType: fsType } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { photonPersistentDisk+: { pdID: pdID } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { portworxVolume+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { portworxVolume+: { readOnly: readOnly } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { portworxVolume+: { volumeID: volumeID } }, }, '#projected':: d.obj(help='"Represents a projected volume source"'), projected: { - '#withDefaultMode':: d.fn(help='"Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { projected+: { defaultMode: defaultMode } }, - '#withSources':: d.fn(help='"list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), + '#withSources':: d.fn(help='"sources is the list of volume projections"', args=[d.arg(name='sources', type=d.T.array)]), withSources(sources): { projected+: { sources: if std.isArray(v=sources) then sources else [sources] } }, - '#withSourcesMixin':: d.fn(help='"list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), + '#withSourcesMixin':: d.fn(help='"sources is the list of volume projections"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='sources', type=d.T.array)]), withSourcesMixin(sources): { projected+: { sources+: if std.isArray(v=sources) then sources else [sources] } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { quobyte+: { group: group } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { quobyte+: { readOnly: readOnly } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { quobyte+: { registry: registry } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { quobyte+: { tenant: tenant } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { quobyte+: { user: user } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { quobyte+: { volume: volume } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), @@ -383,21 +394,21 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { rbd+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { rbd+: { fsType: fsType } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { rbd+: { image: image } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { rbd+: { keyring: keyring } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { rbd+: { pool: pool } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { rbd+: { readOnly: readOnly } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { rbd+: { user: user } }, }, '#scaleIO':: d.obj(help='"ScaleIOVolumeSource represents a persistent ScaleIO volume"'), @@ -407,36 +418,36 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { scaleIO+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { scaleIO+: { fsType: fsType } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { scaleIO+: { gateway: gateway } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { scaleIO+: { protectionDomain: protectionDomain } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { scaleIO+: { readOnly: readOnly } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled Flag enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { scaleIO+: { sslEnabled: sslEnabled } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { scaleIO+: { storageMode: storageMode } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { scaleIO+: { storagePool: storagePool } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { scaleIO+: { system: system } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { scaleIO+: { volumeName: volumeName } }, }, '#secret':: d.obj(help="\"Adapts a Secret into a volume.\\n\\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.\""), secret: { - '#withDefaultMode':: d.fn(help='"Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), + '#withDefaultMode':: d.fn(help='"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set."', args=[d.arg(name='defaultMode', type=d.T.integer)]), withDefaultMode(defaultMode): { secret+: { defaultMode: defaultMode } }, - '#withItems':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + '#withItems':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, - '#withItemsMixin':: d.fn(help="\"If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + '#withItemsMixin':: d.fn(help="\"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, - '#withOptional':: d.fn(help='"Specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), withOptional(optional): { secret+: { optional: optional } }, - '#withSecretName':: d.fn(help="\"Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help="\"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret\"", args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { secret+: { secretName: secretName } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -446,27 +457,27 @@ '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { storageos+: { secretRef+: { name: name } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { storageos+: { fsType: fsType } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { storageos+: { readOnly: readOnly } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { storageos+: { volumeName: volumeName } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { storageos+: { volumeNamespace: volumeNamespace } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { vsphereVolume+: { fsType: fsType } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { vsphereVolume+: { storagePolicyID: storagePolicyID } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { vsphereVolume+: { storagePolicyName: storagePolicyName } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { vsphereVolume+: { volumePath: volumePath } }, }, - '#withName':: d.fn(help="\"Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\"", args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeDevice.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeDevice.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeDevice.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeDevice.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeMount.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeMount.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeMount.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeMount.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeNodeAffinity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeNodeAffinity.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/volumeNodeAffinity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeNodeAffinity.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet new file mode 100644 index 00000000000..80ce06af152 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeProjection.libsonnet @@ -0,0 +1,66 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeProjection', url='', help='"Projection that may be projected along with other supported volume types"'), + '#clusterTrustBundle':: d.obj(help='"ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem."'), + clusterTrustBundle: { + '#labelSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + labelSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { clusterTrustBundle+: { labelSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels: matchLabels } } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { clusterTrustBundle+: { labelSelector+: { matchLabels+: matchLabels } } }, + }, + '#withName':: d.fn(help='"Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { clusterTrustBundle+: { name: name } }, + '#withOptional':: d.fn(help="\"If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.\"", args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { clusterTrustBundle+: { optional: optional } }, + '#withPath':: d.fn(help='"Relative path from the volume root to write the bundle."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { clusterTrustBundle+: { path: path } }, + '#withSignerName':: d.fn(help='"Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated."', args=[d.arg(name='signerName', type=d.T.string)]), + withSignerName(signerName): { clusterTrustBundle+: { signerName: signerName } }, + }, + '#configMap':: d.obj(help="\"Adapts a ConfigMap into a projected volume.\\n\\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.\""), + configMap: { + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { configMap+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { configMap+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { configMap+: { name: name } }, + '#withOptional':: d.fn(help='"optional specify whether the ConfigMap or its keys must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { configMap+: { optional: optional } }, + }, + '#downwardAPI':: d.obj(help='"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode."'), + downwardAPI: { + '#withItems':: d.fn(help='"Items is a list of DownwardAPIVolume file"', args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { downwardAPI+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help='"Items is a list of DownwardAPIVolume file"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { downwardAPI+: { items+: if std.isArray(v=items) then items else [items] } }, + }, + '#secret':: d.obj(help="\"Adapts a secret into a projected volume.\\n\\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.\""), + secret: { + '#withItems':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"", args=[d.arg(name='items', type=d.T.array)]), + withItems(items): { secret+: { items: if std.isArray(v=items) then items else [items] } }, + '#withItemsMixin':: d.fn(help="\"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='items', type=d.T.array)]), + withItemsMixin(items): { secret+: { items+: if std.isArray(v=items) then items else [items] } }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { secret+: { name: name } }, + '#withOptional':: d.fn(help='"optional field specify whether the Secret or its key must be defined"', args=[d.arg(name='optional', type=d.T.boolean)]), + withOptional(optional): { secret+: { optional: optional } }, + }, + '#serviceAccountToken':: d.obj(help='"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise)."'), + serviceAccountToken: { + '#withAudience':: d.fn(help='"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + withAudience(audience): { serviceAccountToken+: { audience: audience } }, + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + withExpirationSeconds(expirationSeconds): { serviceAccountToken+: { expirationSeconds: expirationSeconds } }, + '#withPath':: d.fn(help='"path is the path relative to the mount point of the file to project the token into."', args=[d.arg(name='path', type=d.T.string)]), + withPath(path): { serviceAccountToken+: { path: path } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet similarity index 64% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet index 4a585a66da2..63bede95a8c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/resourceRequirements.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/volumeResourceRequirements.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='resourceRequirements', url='', help='"ResourceRequirements describes the compute resource requirements."'), + '#':: d.pkg(name='volumeResourceRequirements', url='', help='"VolumeResourceRequirements describes the storage resource requirements for a volume."'), '#withLimits':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='limits', type=d.T.object)]), withLimits(limits): { limits: limits }, '#withLimitsMixin':: d.fn(help='"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='limits', type=d.T.object)]), withLimitsMixin(limits): { limits+: limits }, - '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequests':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"', args=[d.arg(name='requests', type=d.T.object)]), withRequests(requests): { requests: requests }, - '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), + '#withRequestsMixin':: d.fn(help='"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='requests', type=d.T.object)]), withRequestsMixin(requests): { requests+: requests }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet new file mode 100644 index 00000000000..61cb5cfde2b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/vsphereVirtualDiskVolumeSource.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='vsphereVirtualDiskVolumeSource', url='', help='"Represents a vSphere volume resource."'), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + withFsType(fsType): { fsType: fsType }, + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + withStoragePolicyID(storagePolicyID): { storagePolicyID: storagePolicyID }, + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + withStoragePolicyName(storagePolicyName): { storagePolicyName: storagePolicyName }, + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + withVolumePath(volumePath): { volumePath: volumePath }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet similarity index 61% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet index 3e01af44be1..f1bc1092cd2 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/weightedPodAffinityTerm.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/weightedPodAffinityTerm.libsonnet @@ -25,9 +25,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { podAffinityTerm+: { namespaceSelector+: { matchLabels+: matchLabels } } }, }, - '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withMatchLabelKeys':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeys(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMatchLabelKeysMixin':: d.fn(help="\"MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='matchLabelKeys', type=d.T.array)]), + withMatchLabelKeysMixin(matchLabelKeys): { podAffinityTerm+: { matchLabelKeys+: if std.isArray(v=matchLabelKeys) then matchLabelKeys else [matchLabelKeys] } }, + '#withMismatchLabelKeys':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeys(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withMismatchLabelKeysMixin':: d.fn(help="\"MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='mismatchLabelKeys', type=d.T.array)]), + withMismatchLabelKeysMixin(mismatchLabelKeys): { podAffinityTerm+: { mismatchLabelKeys+: if std.isArray(v=mismatchLabelKeys) then mismatchLabelKeys else [mismatchLabelKeys] } }, + '#withNamespaces':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespaces(namespaces): { podAffinityTerm+: { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, - '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), + '#withNamespacesMixin':: d.fn(help="\"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='namespaces', type=d.T.array)]), withNamespacesMixin(namespaces): { podAffinityTerm+: { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] } }, '#withTopologyKey':: d.fn(help='"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed."', args=[d.arg(name='topologyKey', type=d.T.string)]), withTopologyKey(topologyKey): { podAffinityTerm+: { topologyKey: topologyKey } }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet index b828906f099..431c79aa82b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/core/v1/windowsSecurityContextOptions.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/core/v1/windowsSecurityContextOptions.libsonnet @@ -5,6 +5,8 @@ withGmsaCredentialSpec(gmsaCredentialSpec): { gmsaCredentialSpec: gmsaCredentialSpec }, '#withGmsaCredentialSpecName':: d.fn(help='"GMSACredentialSpecName is the name of the GMSA credential spec to use."', args=[d.arg(name='gmsaCredentialSpecName', type=d.T.string)]), withGmsaCredentialSpecName(gmsaCredentialSpecName): { gmsaCredentialSpecName: gmsaCredentialSpecName }, + '#withHostProcess':: d.fn(help="\"HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.\"", args=[d.arg(name='hostProcess', type=d.T.boolean)]), + withHostProcess(hostProcess): { hostProcess: hostProcess }, '#withRunAsUserName':: d.fn(help='"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."', args=[d.arg(name='runAsUserName', type=d.T.string)]), withRunAsUserName(runAsUserName): { runAsUserName: runAsUserName }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet index 1b7f8b0952e..166970c3673 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='discovery', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet similarity index 89% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet index 0eff672d68a..7b977662d75 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpoint.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpoint.libsonnet @@ -3,11 +3,11 @@ '#':: d.pkg(name='endpoint', url='', help='"Endpoint represents a single logical \\"backend\\" implementing a service."'), '#conditions':: d.obj(help='"EndpointConditions represents the current condition of an endpoint."'), conditions: { - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { conditions+: { ready: ready } }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), + '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition."', args=[d.arg(name='serving', type=d.T.boolean)]), withServing(serving): { conditions+: { serving: serving } }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), + '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating."', args=[d.arg(name='terminating', type=d.T.boolean)]), withTerminating(terminating): { conditions+: { terminating: terminating } }, }, '#hints':: d.obj(help='"EndpointHints provides hints describing how an endpoint should be consumed."'), @@ -34,9 +34,9 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { targetRef+: { uid: uid } }, }, - '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddresses':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267"', args=[d.arg(name='addresses', type=d.T.array)]), withAddresses(addresses): { addresses: if std.isArray(v=addresses) then addresses else [addresses] }, - '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), + '#withAddressesMixin':: d.fn(help='"addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='addresses', type=d.T.array)]), withAddressesMixin(addresses): { addresses+: if std.isArray(v=addresses) then addresses else [addresses] }, '#withDeprecatedTopology':: d.fn(help='"deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead."', args=[d.arg(name='deprecatedTopology', type=d.T.object)]), withDeprecatedTopology(deprecatedTopology): { deprecatedTopology: deprecatedTopology }, @@ -44,7 +44,7 @@ withDeprecatedTopologyMixin(deprecatedTopology): { deprecatedTopology+: deprecatedTopology }, '#withHostname':: d.fn(help='"hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation."', args=[d.arg(name='hostname', type=d.T.string)]), withHostname(hostname): { hostname: hostname }, - '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, '#withZone':: d.fn(help='"zone is the name of the Zone this endpoint exists in."', args=[d.arg(name='zone', type=d.T.string)]), withZone(zone): { zone: zone }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet index 9a99830684d..fc7c80f2663 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/endpointConditions.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointConditions.libsonnet @@ -1,11 +1,11 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='endpointConditions', url='', help='"EndpointConditions represents the current condition of an endpoint."'), - '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints."', args=[d.arg(name='ready', type=d.T.boolean)]), + '#withReady':: d.fn(help='"ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \\"true\\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag."', args=[d.arg(name='ready', type=d.T.boolean)]), withReady(ready): { ready: ready }, - '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='serving', type=d.T.boolean)]), + '#withServing':: d.fn(help='"serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition."', args=[d.arg(name='serving', type=d.T.boolean)]), withServing(serving): { serving: serving }, - '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate."', args=[d.arg(name='terminating', type=d.T.boolean)]), + '#withTerminating':: d.fn(help='"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating."', args=[d.arg(name='terminating', type=d.T.boolean)]), withTerminating(terminating): { terminating: terminating }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointHints.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointHints.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointHints.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointHints.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet new file mode 100644 index 00000000000..a3d80f6fee4 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointPort.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='endpointPort', url='', help='"EndpointPort represents a Port used by an EndpointSlice"'), + '#withAppProtocol':: d.fn(help="\"The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\\n\\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\\n\\n* Kubernetes-defined prefixed names:\\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\\n\\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.\"", args=[d.arg(name='appProtocol', type=d.T.string)]), + withAppProtocol(appProtocol): { appProtocol: appProtocol }, + '#withName':: d.fn(help="\"name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.\"", args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withPort':: d.fn(help='"port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet index 95f0b2c3018..2b74dbfcf47 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/endpointSlice.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/endpointSlice.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='endpointSlice', url='', help='"EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of EndpointSlice', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/forZone.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1beta1/forZone.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/forZone.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/discovery/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/discovery/v1/main.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet index c60a9f4d15d..656164c451e 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='events', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet similarity index 90% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet index bcab9dec0b9..44db17976a6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/event.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/event.libsonnet @@ -10,12 +10,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -26,21 +24,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -48,9 +46,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Event', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/eventSeries.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/eventSeries.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/eventSeries.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/eventSeries.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/events/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/events/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet similarity index 55% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet index f20b724e04c..c3d969d1ef2 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/main.libsonnet @@ -1,5 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='flowcontrol', url='', help=''), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1: (import 'v1/main.libsonnet'), + v1beta3: (import 'v1beta3/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..7ee3000ff9a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/exemptPriorityLevelConfiguration.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='exemptPriorityLevelConfiguration', url='', help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet new file mode 100644 index 00000000000..e71b128be9c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowDistinguisherMethod.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowDistinguisherMethod', url='', help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet new file mode 100644 index 00000000000..d27ead1fd4b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchema.libsonnet @@ -0,0 +1,73 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchema', url='', help='"FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \\"flow distinguisher\\"."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of FlowSchema', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'flowcontrol.apiserver.k8s.io/v1', + kind: 'FlowSchema', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), + spec: { + '#distinguisherMethod':: d.obj(help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + distinguisherMethod: { + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { distinguisherMethod+: { type: type } } }, + }, + '#priorityLevelConfiguration':: d.obj(help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + priorityLevelConfiguration: { + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { priorityLevelConfiguration+: { name: name } } }, + }, + '#withMatchingPrecedence':: d.fn(help='"`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default."', args=[d.arg(name='matchingPrecedence', type=d.T.integer)]), + withMatchingPrecedence(matchingPrecedence): { spec+: { matchingPrecedence: matchingPrecedence } }, + '#withRules':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, + '#withRulesMixin':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet similarity index 52% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet index dcdeef65b15..f0baf7e1af4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta1/horizontalPodAutoscalerCondition.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaCondition.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerCondition', url='', help='"HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point."'), + '#':: d.pkg(name='flowSchemaCondition', url='', help='"FlowSchemaCondition describes conditions for a FlowSchema."'), '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"message is a human-readable explanation containing details about the transition"', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"`message` is a human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"reason is the reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), + '#withReason':: d.fn(help="\"`reason` is a unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"type describes the current condition"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"`type` is the type of the condition. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet new file mode 100644 index 00000000000..df30310c10c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaSpec.libsonnet @@ -0,0 +1,22 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchemaSpec', url='', help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), + '#distinguisherMethod':: d.obj(help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + distinguisherMethod: { + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { distinguisherMethod+: { type: type } }, + }, + '#priorityLevelConfiguration':: d.obj(help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + priorityLevelConfiguration: { + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { priorityLevelConfiguration+: { name: name } }, + }, + '#withMatchingPrecedence':: d.fn(help='"`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default."', args=[d.arg(name='matchingPrecedence', type=d.T.integer)]), + withMatchingPrecedence(matchingPrecedence): { matchingPrecedence: matchingPrecedence }, + '#withRules':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, + '#withRulesMixin':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet new file mode 100644 index 00000000000..3f960b32954 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/flowSchemaStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchemaStatus', url='', help='"FlowSchemaStatus represents the current state of a FlowSchema."'), + '#withConditions':: d.fn(help='"`conditions` is a list of the current states of FlowSchema."', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"`conditions` is a list of the current states of FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet new file mode 100644 index 00000000000..35b4d42216b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/groupSubject.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='groupSubject', url='', help='"GroupSubject holds detailed information for group-kind subject."'), + '#withName':: d.fn(help='"name is the user group that matches, or \\"*\\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet new file mode 100644 index 00000000000..b261eef3661 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitResponse.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='limitResponse', url='', help='"LimitResponse defines how to handle requests that can not be executed right now."'), + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { queuing+: { handSize: handSize } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { queuing+: { queueLengthLimit: queueLengthLimit } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { queuing+: { queues: queues } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..20776090efc --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/limitedPriorityLevelConfiguration.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limitResponse+: { queuing+: { handSize: handSize } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limitResponse+: { queuing+: { queues: queues } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limitResponse+: { type: type } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { borrowingLimitPercent: borrowingLimitPercent }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet similarity index 91% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet index c386b72b841..1a114f03e60 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), + '#':: d.pkg(name='v1', url='', help=''), + exemptPriorityLevelConfiguration: (import 'exemptPriorityLevelConfiguration.libsonnet'), flowDistinguisherMethod: (import 'flowDistinguisherMethod.libsonnet'), flowSchema: (import 'flowSchema.libsonnet'), flowSchemaCondition: (import 'flowSchemaCondition.libsonnet'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet new file mode 100644 index 00000000000..62082550083 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/nonResourcePolicyRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='nonResourcePolicyRule', url='', help='"NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request."'), + '#withNonResourceURLs':: d.fn(help='"`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\\n - \\"/healthz\\" is legal\\n - \\"/hea*\\" is illegal\\n - \\"/hea\\" is legal but matches nothing\\n - \\"/hea/*\\" also matches nothing\\n - \\"/healthz/*\\" matches all per-component health checks.\\n\\"*\\" matches all non-resource urls. if it is present, it must be the only entry. Required."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withNonResourceURLsMixin':: d.fn(help='"`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\\n - \\"/healthz\\" is legal\\n - \\"/hea*\\" is illegal\\n - \\"/hea\\" is legal but matches nothing\\n - \\"/hea/*\\" also matches nothing\\n - \\"/healthz/*\\" matches all per-component health checks.\\n\\"*\\" matches all non-resource urls. if it is present, it must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withVerbs':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs. If it is present, it must be the only entry. Required."', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs. If it is present, it must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet new file mode 100644 index 00000000000..b743ffba688 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/policyRulesWithSubjects.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='policyRulesWithSubjects', url='', help='"PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request."'), + '#withNonResourceRules':: d.fn(help='"`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL."', args=[d.arg(name='nonResourceRules', type=d.T.array)]), + withNonResourceRules(nonResourceRules): { nonResourceRules: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, + '#withNonResourceRulesMixin':: d.fn(help='"`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceRules', type=d.T.array)]), + withNonResourceRulesMixin(nonResourceRules): { nonResourceRules+: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, + '#withResourceRules':: d.fn(help='"`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withSubjects':: d.fn(help='"subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required."', args=[d.arg(name='subjects', type=d.T.array)]), + withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, + '#withSubjectsMixin':: d.fn(help='"subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), + withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..5d93abb5070 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfiguration.libsonnet @@ -0,0 +1,89 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfiguration', url='', help='"PriorityLevelConfiguration represents the configuration of a priority level."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of PriorityLevelConfiguration', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'flowcontrol.apiserver.k8s.io/v1', + kind: 'PriorityLevelConfiguration', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + spec: { + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { exempt+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { spec+: { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { spec+: { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { spec+: { limited+: { limitResponse+: { queuing+: { queues: queues } } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { limited+: { limitResponse+: { type: type } } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { spec+: { limited+: { borrowingLimitPercent: borrowingLimitPercent } } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { limited+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { type: type } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet new file mode 100644 index 00000000000..e7ff252eaec --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationCondition.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationCondition', url='', help='"PriorityLevelConfigurationCondition defines the condition of priority level."'), + '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), + withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, + '#withMessage':: d.fn(help='"`message` is a human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withReason':: d.fn(help="\"`reason` is a unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#withType':: d.fn(help='"`type` is the type of the condition. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet new file mode 100644 index 00000000000..8532fa3d901 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationReference.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationReference', url='', help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet new file mode 100644 index 00000000000..cab1ca91362 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationSpec.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { exempt+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limited+: { limitResponse+: { type: type } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { limited+: { borrowingLimitPercent: borrowingLimitPercent } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { limited+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\\n\\nIf not specified, this field defaults to a value of 30.\\n\\nSetting this field to zero supports the construction of a \\\"jail\\\" for this priority level that is used to hold some request(s)\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet new file mode 100644 index 00000000000..9432a99381a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/priorityLevelConfigurationStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationStatus', url='', help='"PriorityLevelConfigurationStatus represents the current state of a \\"request-priority\\"."'), + '#withConditions':: d.fn(help='"`conditions` is the current state of \\"request-priority\\"."', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"`conditions` is the current state of \\"request-priority\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet new file mode 100644 index 00000000000..e74a2f38ed7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/queuingConfiguration.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='queuingConfiguration', url='', help='"QueuingConfiguration holds the configuration parameters for queuing"'), + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { handSize: handSize }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { queueLengthLimit: queueLengthLimit }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { queues: queues }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet new file mode 100644 index 00000000000..4b74e679f01 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/resourcePolicyRule.libsonnet @@ -0,0 +1,24 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\\\"\\\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.\""), + '#withApiGroups':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withClusterScope':: d.fn(help='"`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list."', args=[d.arg(name='clusterScope', type=d.T.boolean)]), + withClusterScope(clusterScope): { clusterScope: clusterScope }, + '#withNamespaces':: d.fn(help='"`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \\"*\\". Note that \\"*\\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true."', args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help='"`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \\"*\\". Note that \\"*\\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withResources':: d.fn(help='"`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \\"services\\", \\"nodes/status\\" ]. This list may not be empty. \\"*\\" matches all resources and, if present, must be the only entry. Required."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help='"`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \\"services\\", \\"nodes/status\\" ]. This list may not be empty. \\"*\\" matches all resources and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withVerbs':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs and, if present, must be the only entry. Required."', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet new file mode 100644 index 00000000000..1887fc018c0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/serviceAccountSubject.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceAccountSubject', url='', help='"ServiceAccountSubject holds detailed information for service-account-kind subject."'), + '#withName':: d.fn(help='"`name` is the name of matching ServiceAccount objects, or \\"*\\" to match regardless of name. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"`namespace` is the namespace of matching ServiceAccount objects. Required."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet new file mode 100644 index 00000000000..16f120b33b5 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/subject.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='subject', url='', help='"Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account."'), + '#group':: d.obj(help='"GroupSubject holds detailed information for group-kind subject."'), + group: { + '#withName':: d.fn(help='"name is the user group that matches, or \\"*\\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { group+: { name: name } }, + }, + '#serviceAccount':: d.obj(help='"ServiceAccountSubject holds detailed information for service-account-kind subject."'), + serviceAccount: { + '#withName':: d.fn(help='"`name` is the name of matching ServiceAccount objects, or \\"*\\" to match regardless of name. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { serviceAccount+: { name: name } }, + '#withNamespace':: d.fn(help='"`namespace` is the namespace of matching ServiceAccount objects. Required."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { serviceAccount+: { namespace: namespace } }, + }, + '#user':: d.obj(help='"UserSubject holds detailed information for user-kind subject."'), + user: { + '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { user+: { name: name } }, + }, + '#withKind':: d.fn(help='"`kind` indicates which one of the other fields is non-empty. Required"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet new file mode 100644 index 00000000000..00a560d3fbe --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1/userSubject.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='userSubject', url='', help='"UserSubject holds detailed information for user-kind subject."'), + '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet new file mode 100644 index 00000000000..7ee3000ff9a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/exemptPriorityLevelConfiguration.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='exemptPriorityLevelConfiguration', url='', help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet new file mode 100644 index 00000000000..e71b128be9c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowDistinguisherMethod.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowDistinguisherMethod', url='', help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet new file mode 100644 index 00000000000..70fdc485952 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchema.libsonnet @@ -0,0 +1,73 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchema', url='', help='"FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \\"flow distinguisher\\"."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of FlowSchema', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta3', + kind: 'FlowSchema', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), + spec: { + '#distinguisherMethod':: d.obj(help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + distinguisherMethod: { + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { spec+: { distinguisherMethod+: { type: type } } }, + }, + '#priorityLevelConfiguration':: d.obj(help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + priorityLevelConfiguration: { + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { priorityLevelConfiguration+: { name: name } } }, + }, + '#withMatchingPrecedence':: d.fn(help='"`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default."', args=[d.arg(name='matchingPrecedence', type=d.T.integer)]), + withMatchingPrecedence(matchingPrecedence): { spec+: { matchingPrecedence: matchingPrecedence } }, + '#withRules':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, + '#withRulesMixin':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet similarity index 52% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet index dcdeef65b15..f0baf7e1af4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/autoscaling/v2beta2/horizontalPodAutoscalerCondition.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaCondition.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='horizontalPodAutoscalerCondition', url='', help='"HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point."'), + '#':: d.pkg(name='flowSchemaCondition', url='', help='"FlowSchemaCondition describes conditions for a FlowSchema."'), '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, - '#withMessage':: d.fn(help='"message is a human-readable explanation containing details about the transition"', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"`message` is a human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, - '#withReason':: d.fn(help="\"reason is the reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), + '#withReason':: d.fn(help="\"`reason` is a unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), withReason(reason): { reason: reason }, - '#withType':: d.fn(help='"type describes the current condition"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"`type` is the type of the condition. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { type: type }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet new file mode 100644 index 00000000000..df30310c10c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaSpec.libsonnet @@ -0,0 +1,22 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchemaSpec', url='', help="\"FlowSchemaSpec describes how the FlowSchema's specification looks like.\""), + '#distinguisherMethod':: d.obj(help='"FlowDistinguisherMethod specifies the method of a flow distinguisher."'), + distinguisherMethod: { + '#withType':: d.fn(help='"`type` is the type of flow distinguisher method The supported types are \\"ByUser\\" and \\"ByNamespace\\". Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { distinguisherMethod+: { type: type } }, + }, + '#priorityLevelConfiguration':: d.obj(help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + priorityLevelConfiguration: { + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { priorityLevelConfiguration+: { name: name } }, + }, + '#withMatchingPrecedence':: d.fn(help='"`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default."', args=[d.arg(name='matchingPrecedence', type=d.T.integer)]), + withMatchingPrecedence(matchingPrecedence): { matchingPrecedence: matchingPrecedence }, + '#withRules':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."', args=[d.arg(name='rules', type=d.T.array)]), + withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, + '#withRulesMixin':: d.fn(help='"`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet new file mode 100644 index 00000000000..3f960b32954 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/flowSchemaStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='flowSchemaStatus', url='', help='"FlowSchemaStatus represents the current state of a FlowSchema."'), + '#withConditions':: d.fn(help='"`conditions` is a list of the current states of FlowSchema."', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"`conditions` is a list of the current states of FlowSchema."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet new file mode 100644 index 00000000000..35b4d42216b --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/groupSubject.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='groupSubject', url='', help='"GroupSubject holds detailed information for group-kind subject."'), + '#withName':: d.fn(help='"name is the user group that matches, or \\"*\\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet new file mode 100644 index 00000000000..b261eef3661 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitResponse.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='limitResponse', url='', help='"LimitResponse defines how to handle requests that can not be executed right now."'), + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { queuing+: { handSize: handSize } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { queuing+: { queueLengthLimit: queueLengthLimit } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { queuing+: { queues: queues } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet similarity index 50% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet index a9e81cd73e6..de52c78de37 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/limitedPriorityLevelConfiguration.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/limitedPriorityLevelConfiguration.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), + '#':: d.pkg(name='limitedPriorityLevelConfiguration', url='', help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), limitResponse: { '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), @@ -15,8 +15,12 @@ '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { limitResponse+: { type: type } }, }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { assuredConcurrencyShares: assuredConcurrencyShares }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { borrowingLimitPercent: borrowingLimitPercent }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { lendablePercent: lendablePercent }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { nominalConcurrencyShares: nominalConcurrencyShares }, '#mixin': 'ignore', mixin: self, } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet new file mode 100644 index 00000000000..7085f0abbf6 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/main.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1beta3', url='', help=''), + exemptPriorityLevelConfiguration: (import 'exemptPriorityLevelConfiguration.libsonnet'), + flowDistinguisherMethod: (import 'flowDistinguisherMethod.libsonnet'), + flowSchema: (import 'flowSchema.libsonnet'), + flowSchemaCondition: (import 'flowSchemaCondition.libsonnet'), + flowSchemaSpec: (import 'flowSchemaSpec.libsonnet'), + flowSchemaStatus: (import 'flowSchemaStatus.libsonnet'), + groupSubject: (import 'groupSubject.libsonnet'), + limitResponse: (import 'limitResponse.libsonnet'), + limitedPriorityLevelConfiguration: (import 'limitedPriorityLevelConfiguration.libsonnet'), + nonResourcePolicyRule: (import 'nonResourcePolicyRule.libsonnet'), + policyRulesWithSubjects: (import 'policyRulesWithSubjects.libsonnet'), + priorityLevelConfiguration: (import 'priorityLevelConfiguration.libsonnet'), + priorityLevelConfigurationCondition: (import 'priorityLevelConfigurationCondition.libsonnet'), + priorityLevelConfigurationReference: (import 'priorityLevelConfigurationReference.libsonnet'), + priorityLevelConfigurationSpec: (import 'priorityLevelConfigurationSpec.libsonnet'), + priorityLevelConfigurationStatus: (import 'priorityLevelConfigurationStatus.libsonnet'), + queuingConfiguration: (import 'queuingConfiguration.libsonnet'), + resourcePolicyRule: (import 'resourcePolicyRule.libsonnet'), + serviceAccountSubject: (import 'serviceAccountSubject.libsonnet'), + subject: (import 'subject.libsonnet'), + userSubject: (import 'userSubject.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet new file mode 100644 index 00000000000..62082550083 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/nonResourcePolicyRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='nonResourcePolicyRule', url='', help='"NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request."'), + '#withNonResourceURLs':: d.fn(help='"`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\\n - \\"/healthz\\" is legal\\n - \\"/hea*\\" is illegal\\n - \\"/hea\\" is legal but matches nothing\\n - \\"/hea/*\\" also matches nothing\\n - \\"/healthz/*\\" matches all per-component health checks.\\n\\"*\\" matches all non-resource urls. if it is present, it must be the only entry. Required."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withNonResourceURLsMixin':: d.fn(help='"`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\\n - \\"/healthz\\" is legal\\n - \\"/hea*\\" is illegal\\n - \\"/hea\\" is legal but matches nothing\\n - \\"/hea/*\\" also matches nothing\\n - \\"/healthz/*\\" matches all per-component health checks.\\n\\"*\\" matches all non-resource urls. if it is present, it must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), + withNonResourceURLsMixin(nonResourceURLs): { nonResourceURLs+: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, + '#withVerbs':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs. If it is present, it must be the only entry. Required."', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs. If it is present, it must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet new file mode 100644 index 00000000000..b743ffba688 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/policyRulesWithSubjects.libsonnet @@ -0,0 +1,18 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='policyRulesWithSubjects', url='', help='"PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request."'), + '#withNonResourceRules':: d.fn(help='"`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL."', args=[d.arg(name='nonResourceRules', type=d.T.array)]), + withNonResourceRules(nonResourceRules): { nonResourceRules: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, + '#withNonResourceRulesMixin':: d.fn(help='"`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nonResourceRules', type=d.T.array)]), + withNonResourceRulesMixin(nonResourceRules): { nonResourceRules+: if std.isArray(v=nonResourceRules) then nonResourceRules else [nonResourceRules] }, + '#withResourceRules':: d.fn(help='"`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty."', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRules(resourceRules): { resourceRules: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withResourceRulesMixin':: d.fn(help='"`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceRules', type=d.T.array)]), + withResourceRulesMixin(resourceRules): { resourceRules+: if std.isArray(v=resourceRules) then resourceRules else [resourceRules] }, + '#withSubjects':: d.fn(help='"subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required."', args=[d.arg(name='subjects', type=d.T.array)]), + withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, + '#withSubjectsMixin':: d.fn(help='"subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), + withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet similarity index 69% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet index 76f42703756..b13be03a4ea 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/flowcontrol/v1beta1/priorityLevelConfiguration.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfiguration.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='priorityLevelConfiguration', url='', help='"PriorityLevelConfiguration represents the configuration of a priority level."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,19 +39,26 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PriorityLevelConfiguration', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta1', + apiVersion: 'flowcontrol.apiserver.k8s.io/v1beta3', kind: 'PriorityLevelConfiguration', } + self.metadata.withName(name=name), '#spec':: d.obj(help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), spec: { - '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n * How are requests for this priority level limited?\\n * What should be done with requests that exceed the limit?"'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { exempt+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), limited: { '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), limitResponse: { @@ -69,8 +74,12 @@ '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { limited+: { limitResponse+: { type: type } } } }, }, - '#withAssuredConcurrencyShares':: d.fn(help="\"`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\\n\\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\\n\\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.\"", args=[d.arg(name='assuredConcurrencyShares', type=d.T.integer)]), - withAssuredConcurrencyShares(assuredConcurrencyShares): { spec+: { limited+: { assuredConcurrencyShares: assuredConcurrencyShares } } }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { spec+: { limited+: { borrowingLimitPercent: borrowingLimitPercent } } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { spec+: { limited+: { lendablePercent: lendablePercent } } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { spec+: { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } } }, }, '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { type: type } }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet new file mode 100644 index 00000000000..e7ff252eaec --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationCondition.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationCondition', url='', help='"PriorityLevelConfigurationCondition defines the condition of priority level."'), + '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), + withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, + '#withMessage':: d.fn(help='"`message` is a human-readable message indicating details about last transition."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withReason':: d.fn(help="\"`reason` is a unique, one-word, CamelCase reason for the condition's last transition.\"", args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#withType':: d.fn(help='"`type` is the type of the condition. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet new file mode 100644 index 00000000000..8532fa3d901 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationReference.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationReference', url='', help='"PriorityLevelConfigurationReference contains information that points to the \\"request-priority\\" being used."'), + '#withName':: d.fn(help='"`name` is the name of the priority level configuration being referenced Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet new file mode 100644 index 00000000000..e035cdb2d82 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationSpec.libsonnet @@ -0,0 +1,38 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationSpec', url='', help='"PriorityLevelConfigurationSpec specifies the configuration of a priority level."'), + '#exempt':: d.obj(help='"ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`."'), + exempt: { + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { exempt+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { exempt+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#limited':: d.obj(help='"LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\\n - How are requests for this priority level limited?\\n - What should be done with requests that exceed the limit?"'), + limited: { + '#limitResponse':: d.obj(help='"LimitResponse defines how to handle requests that can not be executed right now."'), + limitResponse: { + '#queuing':: d.obj(help='"QueuingConfiguration holds the configuration parameters for queuing"'), + queuing: { + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { limited+: { limitResponse+: { queuing+: { handSize: handSize } } } }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { limited+: { limitResponse+: { queuing+: { queueLengthLimit: queueLengthLimit } } } }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { limited+: { limitResponse+: { queuing+: { queues: queues } } } }, + }, + '#withType':: d.fn(help='"`type` is \\"Queue\\" or \\"Reject\\". \\"Queue\\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \\"Reject\\" means that requests that can not be executed upon arrival are rejected. Required."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { limited+: { limitResponse+: { type: type } } }, + }, + '#withBorrowingLimitPercent':: d.fn(help="\"`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\\n\\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\\n\\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.\"", args=[d.arg(name='borrowingLimitPercent', type=d.T.integer)]), + withBorrowingLimitPercent(borrowingLimitPercent): { limited+: { borrowingLimitPercent: borrowingLimitPercent } }, + '#withLendablePercent':: d.fn(help="\"`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\\n\\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )\"", args=[d.arg(name='lendablePercent', type=d.T.integer)]), + withLendablePercent(lendablePercent): { limited+: { lendablePercent: lendablePercent } }, + '#withNominalConcurrencyShares':: d.fn(help="\"`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\\n\\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\\n\\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.\"", args=[d.arg(name='nominalConcurrencyShares', type=d.T.integer)]), + withNominalConcurrencyShares(nominalConcurrencyShares): { limited+: { nominalConcurrencyShares: nominalConcurrencyShares } }, + }, + '#withType':: d.fn(help="\"`type` indicates whether this priority level is subject to limitation on request execution. A value of `\\\"Exempt\\\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\\\"Limited\\\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.\"", args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet new file mode 100644 index 00000000000..9432a99381a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/priorityLevelConfigurationStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='priorityLevelConfigurationStatus', url='', help='"PriorityLevelConfigurationStatus represents the current state of a \\"request-priority\\"."'), + '#withConditions':: d.fn(help='"`conditions` is the current state of \\"request-priority\\"."', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"`conditions` is the current state of \\"request-priority\\"."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet new file mode 100644 index 00000000000..e74a2f38ed7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/queuingConfiguration.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='queuingConfiguration', url='', help='"QueuingConfiguration holds the configuration parameters for queuing"'), + '#withHandSize':: d.fn(help="\"`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.\"", args=[d.arg(name='handSize', type=d.T.integer)]), + withHandSize(handSize): { handSize: handSize }, + '#withQueueLengthLimit':: d.fn(help='"`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50."', args=[d.arg(name='queueLengthLimit', type=d.T.integer)]), + withQueueLengthLimit(queueLengthLimit): { queueLengthLimit: queueLengthLimit }, + '#withQueues':: d.fn(help='"`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64."', args=[d.arg(name='queues', type=d.T.integer)]), + withQueues(queues): { queues: queues }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet new file mode 100644 index 00000000000..4b74e679f01 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/resourcePolicyRule.libsonnet @@ -0,0 +1,24 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourcePolicyRule', url='', help="\"ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\\\"\\\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.\""), + '#withApiGroups':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withApiGroupsMixin':: d.fn(help='"`apiGroups` is a list of matching API groups and may not be empty. \\"*\\" matches all API groups and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, + '#withClusterScope':: d.fn(help='"`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list."', args=[d.arg(name='clusterScope', type=d.T.boolean)]), + withClusterScope(clusterScope): { clusterScope: clusterScope }, + '#withNamespaces':: d.fn(help='"`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \\"*\\". Note that \\"*\\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true."', args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespaces(namespaces): { namespaces: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withNamespacesMixin':: d.fn(help='"`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \\"*\\". Note that \\"*\\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='namespaces', type=d.T.array)]), + withNamespacesMixin(namespaces): { namespaces+: if std.isArray(v=namespaces) then namespaces else [namespaces] }, + '#withResources':: d.fn(help='"`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \\"services\\", \\"nodes/status\\" ]. This list may not be empty. \\"*\\" matches all resources and, if present, must be the only entry. Required."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help='"`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \\"services\\", \\"nodes/status\\" ]. This list may not be empty. \\"*\\" matches all resources and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#withVerbs':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs and, if present, must be the only entry. Required."', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help='"`verbs` is a list of matching verbs and may not be empty. \\"*\\" matches all verbs and, if present, must be the only entry. Required."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet new file mode 100644 index 00000000000..1887fc018c0 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/serviceAccountSubject.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceAccountSubject', url='', help='"ServiceAccountSubject holds detailed information for service-account-kind subject."'), + '#withName':: d.fn(help='"`name` is the name of matching ServiceAccount objects, or \\"*\\" to match regardless of name. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"`namespace` is the namespace of matching ServiceAccount objects. Required."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet new file mode 100644 index 00000000000..16f120b33b5 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/subject.libsonnet @@ -0,0 +1,25 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='subject', url='', help='"Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account."'), + '#group':: d.obj(help='"GroupSubject holds detailed information for group-kind subject."'), + group: { + '#withName':: d.fn(help='"name is the user group that matches, or \\"*\\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { group+: { name: name } }, + }, + '#serviceAccount':: d.obj(help='"ServiceAccountSubject holds detailed information for service-account-kind subject."'), + serviceAccount: { + '#withName':: d.fn(help='"`name` is the name of matching ServiceAccount objects, or \\"*\\" to match regardless of name. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { serviceAccount+: { name: name } }, + '#withNamespace':: d.fn(help='"`namespace` is the namespace of matching ServiceAccount objects. Required."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { serviceAccount+: { namespace: namespace } }, + }, + '#user':: d.obj(help='"UserSubject holds detailed information for user-kind subject."'), + user: { + '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { user+: { name: name } }, + }, + '#withKind':: d.fn(help='"`kind` indicates which one of the other fields is non-empty. Required"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet new file mode 100644 index 00000000000..00a560d3fbe --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/flowcontrol/v1beta3/userSubject.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='userSubject', url='', help='"UserSubject holds detailed information for user-kind subject."'), + '#withName':: d.fn(help='"`name` is the username that matches, or \\"*\\" to match all usernames. Required."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet new file mode 100644 index 00000000000..6b4c5af3ba8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='meta', url='', help=''), + v1: (import 'v1/main.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet new file mode 100644 index 00000000000..75273eb706f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroup.libsonnet @@ -0,0 +1,28 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiGroup', url='', help='"APIGroup contains the name, the supported versions, and the preferred version of a group."'), + '#new':: d.fn(help='new returns an instance of APIGroup', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'v1', + kind: 'APIGroup', + } + self.metadata.withName(name=name), + '#preferredVersion':: d.obj(help='"GroupVersion contains the \\"group/version\\" and \\"version\\" string of a version. It is made a struct to keep extensibility."'), + preferredVersion: { + '#withGroupVersion':: d.fn(help='"groupVersion specifies the API group and version in the form \\"group/version\\', args=[d.arg(name='groupVersion', type=d.T.string)]), + withGroupVersion(groupVersion): { preferredVersion+: { groupVersion: groupVersion } }, + '#withVersion':: d.fn(help='"version specifies the version in the form of \\"version\\". This is to save the clients the trouble of splitting the GroupVersion."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { preferredVersion+: { version: version } }, + }, + '#withName':: d.fn(help='"name is the name of the group."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withServerAddressByClientCIDRs':: d.fn(help='"a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP."', args=[d.arg(name='serverAddressByClientCIDRs', type=d.T.array)]), + withServerAddressByClientCIDRs(serverAddressByClientCIDRs): { serverAddressByClientCIDRs: if std.isArray(v=serverAddressByClientCIDRs) then serverAddressByClientCIDRs else [serverAddressByClientCIDRs] }, + '#withServerAddressByClientCIDRsMixin':: d.fn(help='"a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='serverAddressByClientCIDRs', type=d.T.array)]), + withServerAddressByClientCIDRsMixin(serverAddressByClientCIDRs): { serverAddressByClientCIDRs+: if std.isArray(v=serverAddressByClientCIDRs) then serverAddressByClientCIDRs else [serverAddressByClientCIDRs] }, + '#withVersions':: d.fn(help='"versions are the versions supported in this group."', args=[d.arg(name='versions', type=d.T.array)]), + withVersions(versions): { versions: if std.isArray(v=versions) then versions else [versions] }, + '#withVersionsMixin':: d.fn(help='"versions are the versions supported in this group."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='versions', type=d.T.array)]), + withVersionsMixin(versions): { versions+: if std.isArray(v=versions) then versions else [versions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet new file mode 100644 index 00000000000..eb97801c35e --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiGroupList.libsonnet @@ -0,0 +1,15 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiGroupList', url='', help='"APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis."'), + '#new':: d.fn(help='new returns an instance of APIGroupList', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'v1', + kind: 'APIGroupList', + } + self.metadata.withName(name=name), + '#withGroups':: d.fn(help='"groups is a list of APIGroup."', args=[d.arg(name='groups', type=d.T.array)]), + withGroups(groups): { groups: if std.isArray(v=groups) then groups else [groups] }, + '#withGroupsMixin':: d.fn(help='"groups is a list of APIGroup."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='groups', type=d.T.array)]), + withGroupsMixin(groups): { groups+: if std.isArray(v=groups) then groups else [groups] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet new file mode 100644 index 00000000000..fbb63a6ea80 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResource.libsonnet @@ -0,0 +1,32 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiResource', url='', help='"APIResource specifies the name of a resource and whether it is namespaced."'), + '#withCategories':: d.fn(help="\"categories is a list of the grouped resources this resource belongs to (e.g. 'all')\"", args=[d.arg(name='categories', type=d.T.array)]), + withCategories(categories): { categories: if std.isArray(v=categories) then categories else [categories] }, + '#withCategoriesMixin':: d.fn(help="\"categories is a list of the grouped resources this resource belongs to (e.g. 'all')\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='categories', type=d.T.array)]), + withCategoriesMixin(categories): { categories+: if std.isArray(v=categories) then categories else [categories] }, + '#withGroup':: d.fn(help='"group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\\"."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { group: group }, + '#withKind':: d.fn(help="\"kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"name is the plural name of the resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespaced':: d.fn(help='"namespaced indicates if a resource is namespaced or not."', args=[d.arg(name='namespaced', type=d.T.boolean)]), + withNamespaced(namespaced): { namespaced: namespaced }, + '#withShortNames':: d.fn(help='"shortNames is a list of suggested short names of the resource."', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNames(shortNames): { shortNames: if std.isArray(v=shortNames) then shortNames else [shortNames] }, + '#withShortNamesMixin':: d.fn(help='"shortNames is a list of suggested short names of the resource."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='shortNames', type=d.T.array)]), + withShortNamesMixin(shortNames): { shortNames+: if std.isArray(v=shortNames) then shortNames else [shortNames] }, + '#withSingularName':: d.fn(help='"singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface."', args=[d.arg(name='singularName', type=d.T.string)]), + withSingularName(singularName): { singularName: singularName }, + '#withStorageVersionHash':: d.fn(help='"The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates."', args=[d.arg(name='storageVersionHash', type=d.T.string)]), + withStorageVersionHash(storageVersionHash): { storageVersionHash: storageVersionHash }, + '#withVerbs':: d.fn(help='"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)"', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVerbsMixin':: d.fn(help='"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, + '#withVersion':: d.fn(help="\"version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\\\".\"", args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet new file mode 100644 index 00000000000..6f5721ba59c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiResourceList.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiResourceList', url='', help='"APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced."'), + '#new':: d.fn(help='new returns an instance of APIResourceList', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'v1', + kind: 'APIResourceList', + } + self.metadata.withName(name=name), + '#withGroupVersion':: d.fn(help='"groupVersion is the group and version this APIResourceList is for."', args=[d.arg(name='groupVersion', type=d.T.string)]), + withGroupVersion(groupVersion): { groupVersion: groupVersion }, + '#withResources':: d.fn(help='"resources contains the name of the resources and if they are namespaced."', args=[d.arg(name='resources', type=d.T.array)]), + withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, + '#withResourcesMixin':: d.fn(help='"resources contains the name of the resources and if they are namespaced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet new file mode 100644 index 00000000000..4bbe76eeb8e --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/apiVersions.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='apiVersions', url='', help='"APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API."'), + '#new':: d.fn(help='new returns an instance of APIVersions', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'v1', + kind: 'APIVersions', + } + self.metadata.withName(name=name), + '#withServerAddressByClientCIDRs':: d.fn(help='"a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP."', args=[d.arg(name='serverAddressByClientCIDRs', type=d.T.array)]), + withServerAddressByClientCIDRs(serverAddressByClientCIDRs): { serverAddressByClientCIDRs: if std.isArray(v=serverAddressByClientCIDRs) then serverAddressByClientCIDRs else [serverAddressByClientCIDRs] }, + '#withServerAddressByClientCIDRsMixin':: d.fn(help='"a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='serverAddressByClientCIDRs', type=d.T.array)]), + withServerAddressByClientCIDRsMixin(serverAddressByClientCIDRs): { serverAddressByClientCIDRs+: if std.isArray(v=serverAddressByClientCIDRs) then serverAddressByClientCIDRs else [serverAddressByClientCIDRs] }, + '#withVersions':: d.fn(help='"versions are the api versions that are available."', args=[d.arg(name='versions', type=d.T.array)]), + withVersions(versions): { versions: if std.isArray(v=versions) then versions else [versions] }, + '#withVersionsMixin':: d.fn(help='"versions are the api versions that are available."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='versions', type=d.T.array)]), + withVersionsMixin(versions): { versions+: if std.isArray(v=versions) then versions else [versions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet new file mode 100644 index 00000000000..5790d945e62 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/condition.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='condition', url='', help='"Condition contains details for one aspect of the current state of this API Resource."'), + '#withLastTransitionTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='lastTransitionTime', type=d.T.string)]), + withLastTransitionTime(lastTransitionTime): { lastTransitionTime: lastTransitionTime }, + '#withMessage':: d.fn(help='"message is a human readable message indicating details about the transition. This may be an empty string."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withObservedGeneration':: d.fn(help='"observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance."', args=[d.arg(name='observedGeneration', type=d.T.integer)]), + withObservedGeneration(observedGeneration): { observedGeneration: observedGeneration }, + '#withReason':: d.fn(help="\"reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.\"", args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#withType':: d.fn(help='"type of condition in CamelCase or in foo.example.com/CamelCase."', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet new file mode 100644 index 00000000000..8ea64668844 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/deleteOptions.libsonnet @@ -0,0 +1,28 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='deleteOptions', url='', help='"DeleteOptions may be provided when deleting an API object."'), + '#new':: d.fn(help='new returns an instance of DeleteOptions', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'storage.k8s.io/v1beta1', + kind: 'DeleteOptions', + } + self.metadata.withName(name=name), + '#preconditions':: d.obj(help='"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out."'), + preconditions: { + '#withResourceVersion':: d.fn(help='"Specifies the target ResourceVersion"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { preconditions+: { resourceVersion: resourceVersion } }, + '#withUid':: d.fn(help='"Specifies the target UID."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { preconditions+: { uid: uid } }, + }, + '#withDryRun':: d.fn(help='"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"', args=[d.arg(name='dryRun', type=d.T.array)]), + withDryRun(dryRun): { dryRun: if std.isArray(v=dryRun) then dryRun else [dryRun] }, + '#withDryRunMixin':: d.fn(help='"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='dryRun', type=d.T.array)]), + withDryRunMixin(dryRun): { dryRun+: if std.isArray(v=dryRun) then dryRun else [dryRun] }, + '#withGracePeriodSeconds':: d.fn(help='"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."', args=[d.arg(name='gracePeriodSeconds', type=d.T.integer)]), + withGracePeriodSeconds(gracePeriodSeconds): { gracePeriodSeconds: gracePeriodSeconds }, + '#withOrphanDependents':: d.fn(help="\"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\\"orphan\\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.\"", args=[d.arg(name='orphanDependents', type=d.T.boolean)]), + withOrphanDependents(orphanDependents): { orphanDependents: orphanDependents }, + '#withPropagationPolicy':: d.fn(help="\"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.\"", args=[d.arg(name='propagationPolicy', type=d.T.string)]), + withPropagationPolicy(propagationPolicy): { propagationPolicy: propagationPolicy }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet new file mode 100644 index 00000000000..08cb9e7b3af --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/fieldsV1.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='fieldsV1', url='', help="\"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\\n\\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\\n\\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\""), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet new file mode 100644 index 00000000000..2b6f62929dd --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/groupVersionForDiscovery.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='groupVersionForDiscovery', url='', help='"GroupVersion contains the \\"group/version\\" and \\"version\\" string of a version. It is made a struct to keep extensibility."'), + '#withGroupVersion':: d.fn(help='"groupVersion specifies the API group and version in the form \\"group/version\\', args=[d.arg(name='groupVersion', type=d.T.string)]), + withGroupVersion(groupVersion): { groupVersion: groupVersion }, + '#withVersion':: d.fn(help='"version specifies the version in the form of \\"version\\". This is to save the clients the trouble of splitting the GroupVersion."', args=[d.arg(name='version', type=d.T.string)]), + withVersion(version): { version: version }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet new file mode 100644 index 00000000000..5edc8eab3cf --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelector.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='labelSelector', url='', help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { matchLabels: matchLabels }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { matchLabels+: matchLabels }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet new file mode 100644 index 00000000000..4c4fc3a03c3 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/labelSelectorRequirement.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='labelSelectorRequirement', url='', help='"A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values."'), + '#withKey':: d.fn(help='"key is the label key that the selector applies to."', args=[d.arg(name='key', type=d.T.string)]), + withKey(key): { key: key }, + '#withOperator':: d.fn(help="\"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.\"", args=[d.arg(name='operator', type=d.T.string)]), + withOperator(operator): { operator: operator }, + '#withValues':: d.fn(help='"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch."', args=[d.arg(name='values', type=d.T.array)]), + withValues(values): { values: if std.isArray(v=values) then values else [values] }, + '#withValuesMixin':: d.fn(help='"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='values', type=d.T.array)]), + withValuesMixin(values): { values+: if std.isArray(v=values) then values else [values] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet new file mode 100644 index 00000000000..b5fba9a144d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/listMeta.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='listMeta', url='', help='"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}."'), + '#withContinue':: d.fn(help='"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message."', args=[d.arg(name='continue', type=d.T.string)]), + withContinue(continue): { continue: continue }, + '#withRemainingItemCount':: d.fn(help='"remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact."', args=[d.arg(name='remainingItemCount', type=d.T.integer)]), + withRemainingItemCount(remainingItemCount): { remainingItemCount: remainingItemCount }, + '#withResourceVersion':: d.fn(help="\"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\"", args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { resourceVersion: resourceVersion }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { selfLink: selfLink }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet new file mode 100644 index 00000000000..ec3251d8b80 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/main.libsonnet @@ -0,0 +1,27 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1', url='', help=''), + apiGroup: (import 'apiGroup.libsonnet'), + apiGroupList: (import 'apiGroupList.libsonnet'), + apiResource: (import 'apiResource.libsonnet'), + apiResourceList: (import 'apiResourceList.libsonnet'), + apiVersions: (import 'apiVersions.libsonnet'), + condition: (import 'condition.libsonnet'), + deleteOptions: (import 'deleteOptions.libsonnet'), + fieldsV1: (import 'fieldsV1.libsonnet'), + groupVersionForDiscovery: (import 'groupVersionForDiscovery.libsonnet'), + labelSelector: (import 'labelSelector.libsonnet'), + labelSelectorRequirement: (import 'labelSelectorRequirement.libsonnet'), + listMeta: (import 'listMeta.libsonnet'), + managedFieldsEntry: (import 'managedFieldsEntry.libsonnet'), + microTime: (import 'microTime.libsonnet'), + objectMeta: (import 'objectMeta.libsonnet'), + ownerReference: (import 'ownerReference.libsonnet'), + patch: (import 'patch.libsonnet'), + preconditions: (import 'preconditions.libsonnet'), + serverAddressByClientCIDR: (import 'serverAddressByClientCIDR.libsonnet'), + statusCause: (import 'statusCause.libsonnet'), + statusDetails: (import 'statusDetails.libsonnet'), + time: (import 'time.libsonnet'), + watchEvent: (import 'watchEvent.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet new file mode 100644 index 00000000000..4c8e5648ede --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/managedFieldsEntry.libsonnet @@ -0,0 +1,20 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='managedFieldsEntry', url='', help='"ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to."'), + '#withFieldsType':: d.fn(help='"FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \\"FieldsV1\\', args=[d.arg(name='fieldsType', type=d.T.string)]), + withFieldsType(fieldsType): { fieldsType: fieldsType }, + '#withFieldsV1':: d.fn(help="\"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\\n\\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\\n\\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\"", args=[d.arg(name='fieldsV1', type=d.T.object)]), + withFieldsV1(fieldsV1): { fieldsV1: fieldsV1 }, + '#withFieldsV1Mixin':: d.fn(help="\"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\\n\\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\\n\\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='fieldsV1', type=d.T.object)]), + withFieldsV1Mixin(fieldsV1): { fieldsV1+: fieldsV1 }, + '#withManager':: d.fn(help='"Manager is an identifier of the workflow managing these fields."', args=[d.arg(name='manager', type=d.T.string)]), + withManager(manager): { manager: manager }, + '#withOperation':: d.fn(help="\"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.\"", args=[d.arg(name='operation', type=d.T.string)]), + withOperation(operation): { operation: operation }, + '#withSubresource':: d.fn(help='"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource."', args=[d.arg(name='subresource', type=d.T.string)]), + withSubresource(subresource): { subresource: subresource }, + '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), + withTime(time): { time: time }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet new file mode 100644 index 00000000000..97e949abf40 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/microTime.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='microTime', url='', help='"MicroTime is version of Time with microsecond level precision."'), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet new file mode 100644 index 00000000000..eb0f0cae8ed --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/objectMeta.libsonnet @@ -0,0 +1,46 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='objectMeta', url='', help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { annotations: annotations }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { annotations+: annotations }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { creationTimestamp: creationTimestamp }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { deletionGracePeriodSeconds: deletionGracePeriodSeconds }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { deletionTimestamp: deletionTimestamp }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { generateName: generateName }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { generation: generation }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { labels: labels }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { labels+: labels }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { resourceVersion: resourceVersion }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { selfLink: selfLink }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet new file mode 100644 index 00000000000..66963fbd4e8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/ownerReference.libsonnet @@ -0,0 +1,16 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ownerReference', url='', help='"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field."'), + '#withBlockOwnerDeletion':: d.fn(help='"If true, AND if the owner has the \\"foregroundDeletion\\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \\"delete\\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned."', args=[d.arg(name='blockOwnerDeletion', type=d.T.boolean)]), + withBlockOwnerDeletion(blockOwnerDeletion): { blockOwnerDeletion: blockOwnerDeletion }, + '#withController':: d.fn(help='"If true, this reference points to the managing controller."', args=[d.arg(name='controller', type=d.T.boolean)]), + withController(controller): { controller: controller }, + '#withKind':: d.fn(help='"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet new file mode 100644 index 00000000000..4d661fa24ab --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/patch.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='patch', url='', help='"Patch is provided to give a concrete name and type to the Kubernetes PATCH request body."'), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet new file mode 100644 index 00000000000..ba4bc5b8799 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/preconditions.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='preconditions', url='', help='"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out."'), + '#withResourceVersion':: d.fn(help='"Specifies the target ResourceVersion"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { resourceVersion: resourceVersion }, + '#withUid':: d.fn(help='"Specifies the target UID."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet new file mode 100644 index 00000000000..726e7cbbb01 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/serverAddressByClientCIDR.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serverAddressByClientCIDR', url='', help='"ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match."'), + '#withClientCIDR':: d.fn(help='"The CIDR with which clients can match their IP to figure out the server address that they should use."', args=[d.arg(name='clientCIDR', type=d.T.string)]), + withClientCIDR(clientCIDR): { clientCIDR: clientCIDR }, + '#withServerAddress':: d.fn(help='"Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port."', args=[d.arg(name='serverAddress', type=d.T.string)]), + withServerAddress(serverAddress): { serverAddress: serverAddress }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet new file mode 100644 index 00000000000..d65867359a7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusCause.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statusCause', url='', help='"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered."'), + '#withField':: d.fn(help='"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\\n\\nExamples:\\n \\"name\\" - the field \\"name\\" on the current resource\\n \\"items[0].name\\" - the field \\"name\\" on the first array entry in \\"items\\', args=[d.arg(name='field', type=d.T.string)]), + withField(field): { field: field }, + '#withMessage':: d.fn(help='"A human-readable description of the cause of the error. This field may be presented as-is to a reader."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { message: message }, + '#withReason':: d.fn(help='"A machine-readable description of the cause of the error. If this value is empty there is no information available."', args=[d.arg(name='reason', type=d.T.string)]), + withReason(reason): { reason: reason }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet new file mode 100644 index 00000000000..31a77b432a2 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/statusDetails.libsonnet @@ -0,0 +1,20 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='statusDetails', url='', help='"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined."'), + '#withCauses':: d.fn(help='"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes."', args=[d.arg(name='causes', type=d.T.array)]), + withCauses(causes): { causes: if std.isArray(v=causes) then causes else [causes] }, + '#withCausesMixin':: d.fn(help='"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='causes', type=d.T.array)]), + withCausesMixin(causes): { causes+: if std.isArray(v=causes) then causes else [causes] }, + '#withGroup':: d.fn(help='"The group attribute of the resource associated with the status StatusReason."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { group: group }, + '#withKind':: d.fn(help='"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"', args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described)."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withRetryAfterSeconds':: d.fn(help='"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action."', args=[d.arg(name='retryAfterSeconds', type=d.T.integer)]), + withRetryAfterSeconds(retryAfterSeconds): { retryAfterSeconds: retryAfterSeconds }, + '#withUid':: d.fn(help='"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet new file mode 100644 index 00000000000..641b9a47529 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/time.libsonnet @@ -0,0 +1,6 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='time', url='', help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."'), + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet new file mode 100644 index 00000000000..0eda21e5050 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/meta/v1/watchEvent.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='watchEvent', url='', help='"Event represents a single event to a watched resource."'), + '#new':: d.fn(help='new returns an instance of WatchEvent', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'storage.k8s.io/v1beta1', + kind: 'WatchEvent', + } + self.metadata.withName(name=name), + '#withObject':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"", args=[d.arg(name='object', type=d.T.object)]), + withObject(object): { object: object }, + '#withObjectMixin':: d.fn(help="\"RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\\",inline\\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\\"myPlugin\\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\\"aOption\\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\\"kind\\\":\\\"MyAPIObject\\\",\\n\\t\\t\\\"apiVersion\\\":\\\"v1\\\",\\n\\t\\t\\\"myPlugin\\\": {\\n\\t\\t\\t\\\"kind\\\":\\\"PluginA\\\",\\n\\t\\t\\t\\\"aOption\\\":\\\"foo\\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='object', type=d.T.object)]), + withObjectMixin(object): { object+: object }, + '#withType':: d.fn(help='', args=[d.arg(name='type', type=d.T.string)]), + withType(type): { type: type }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet similarity index 74% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet index 48137d12473..5eded79427c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/main.libsonnet @@ -2,5 +2,5 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='networking', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), + v1alpha1: (import 'v1alpha1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet index a50ee2feb2d..47fbe973509 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/httpIngressPath.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressPath.libsonnet @@ -16,18 +16,18 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { backend+: { service+: { port+: { name: name } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { backend+: { service+: { port+: { number: number } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { backend+: { service+: { name: name } } }, }, }, - '#withPath':: d.fn(help="\"Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.\"", args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help="\"path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \\\"path\\\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \\\"Exact\\\" or \\\"Prefix\\\".\"", args=[d.arg(name='path', type=d.T.string)]), withPath(path): { path: path }, - '#withPathType':: d.fn(help="\"PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types.\"", args=[d.arg(name='pathType', type=d.T.string)]), + '#withPathType':: d.fn(help="\"pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\\n done on a path element by element basis. A path element refers is the\\n list of labels in the path split by the '/' separator. A request is a\\n match for path p if every p is an element-wise prefix of p of the\\n request path. Note that if the last element of the path is a substring\\n of the last element in request path, it is not a match (e.g. /foo/bar\\n matches /foo/bar/baz, but does not match /foo/barbaz).\\n* ImplementationSpecific: Interpretation of the Path matching is up to\\n the IngressClass. Implementations can treat this as a separate PathType\\n or treat it identically to Prefix or Exact path types.\\nImplementations are required to support all path types.\"", args=[d.arg(name='pathType', type=d.T.string)]), withPathType(pathType): { pathType: pathType }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet similarity index 64% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet index a54d695b4fc..7d54756ae2e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/extensions/v1beta1/httpIngressRuleValue.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/httpIngressRuleValue.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='httpIngressRuleValue', url='', help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), + '#withPaths':: d.fn(help='"paths is a collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), withPaths(paths): { paths: if std.isArray(v=paths) then paths else [paths] }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), + '#withPathsMixin':: d.fn(help='"paths is a collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), withPathsMixin(paths): { paths+: if std.isArray(v=paths) then paths else [paths] }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet similarity index 75% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet index 422b2a630c2..15464bc5e6a 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingress.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingress.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='ingress', url='', help='"Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Ingress', args=[d.arg(name='name', type=d.T.string)]), @@ -68,24 +66,24 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { defaultBackend+: { service+: { port+: { name: name } } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { spec+: { defaultBackend+: { service+: { port+: { number: number } } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { defaultBackend+: { service+: { name: name } } } }, }, }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), + '#withIngressClassName':: d.fn(help='"ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present."', args=[d.arg(name='ingressClassName', type=d.T.string)]), withIngressClassName(ingressClassName): { spec+: { ingressClassName: ingressClassName } }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), + '#withRules':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), withRules(rules): { spec+: { rules: if std.isArray(v=rules) then rules else [rules] } }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + '#withRulesMixin':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), withRulesMixin(rules): { spec+: { rules+: if std.isArray(v=rules) then rules else [rules] } }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), + '#withTls':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), withTls(tls): { spec+: { tls: if std.isArray(v=tls) then tls else [tls] } }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), + '#withTlsMixin':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), withTlsMixin(tls): { spec+: { tls+: if std.isArray(v=tls) then tls else [tls] } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet similarity index 90% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet index c2a7000ac5a..803c12f7c33 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressBackend.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressBackend.libsonnet @@ -14,12 +14,12 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { service+: { port+: { name: name } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { service+: { port+: { number: number } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { service+: { name: name } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet similarity index 82% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet index fbd68ac9f07..5e76306f213 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClass.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='ingressClass', url='', help='"IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of IngressClass', args=[d.arg(name='name', type=d.T.string)]), @@ -55,18 +53,18 @@ spec: { '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { spec+: { parameters+: { apiGroup: apiGroup } } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { parameters+: { kind: kind } } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { parameters+: { name: name } } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { parameters+: { namespace: namespace } } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { spec+: { parameters+: { scope: scope } } }, }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), + '#withController':: d.fn(help='"controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), withController(controller): { spec+: { controller: controller } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet similarity index 69% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet index b1ed8b58500..1c17567d48b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1beta1/ingressClassParametersReference.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassParametersReference.libsonnet @@ -1,15 +1,15 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='ingressClassParametersReference', url='', help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { apiGroup: apiGroup }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { kind: kind }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { namespace: namespace }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { scope: scope }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet similarity index 73% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet index ebbc86576cb..16a9f053825 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressClassSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressClassSpec.libsonnet @@ -3,18 +3,18 @@ '#':: d.pkg(name='ingressClassSpec', url='', help='"IngressClassSpec provides information about the class of an Ingress."'), '#parameters':: d.obj(help='"IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource."'), parameters: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), + '#withApiGroup':: d.fn(help='"apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]), withApiGroup(apiGroup): { parameters+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind is the type of resource being referenced."', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { parameters+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { parameters+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace is the namespace of the resource being referenced. This field is required when scope is set to \\"Namespace\\" and must be unset when scope is set to \\"Cluster\\"."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { parameters+: { namespace: namespace } }, - '#withScope':: d.fn(help='"Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\". Field can be enabled with IngressClassNamespacedParams feature gate."', args=[d.arg(name='scope', type=d.T.string)]), + '#withScope':: d.fn(help='"scope represents if this refers to a cluster or namespace scoped resource. This may be set to \\"Cluster\\" (default) or \\"Namespace\\"."', args=[d.arg(name='scope', type=d.T.string)]), withScope(scope): { parameters+: { scope: scope } }, }, - '#withController':: d.fn(help='"Controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), + '#withController':: d.fn(help='"controller refers to the name of the controller that should handle this class. This allows for different \\"flavors\\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \\"acme.io/ingress-controller\\". This field is immutable."', args=[d.arg(name='controller', type=d.T.string)]), withController(controller): { controller: controller }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet new file mode 100644 index 00000000000..ee9e9abddbc --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerIngress.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressLoadBalancerIngress', url='', help='"IngressLoadBalancerIngress represents the status of a load-balancer ingress point."'), + '#withHostname':: d.fn(help='"hostname is set for load-balancer ingress points that are DNS based."', args=[d.arg(name='hostname', type=d.T.string)]), + withHostname(hostname): { hostname: hostname }, + '#withIp':: d.fn(help='"ip is set for load-balancer ingress points that are IP based."', args=[d.arg(name='ip', type=d.T.string)]), + withIp(ip): { ip: ip }, + '#withPorts':: d.fn(help='"ports provides information about the ports exposed by this LoadBalancer."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports provides information about the ports exposed by this LoadBalancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet new file mode 100644 index 00000000000..bf8a32d2c4f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressLoadBalancerStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressLoadBalancerStatus', url='', help='"IngressLoadBalancerStatus represents the status of a load-balancer."'), + '#withIngress':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."', args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withIngressMixin':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet new file mode 100644 index 00000000000..035d1b0d6a5 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressPortStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressPortStatus', url='', help='"IngressPortStatus represents the error condition of a service port"'), + '#withError':: d.fn(help='"error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\\n CamelCase names\\n- cloud provider specific error values must have names that comply with the\\n format foo.example.com/CamelCase."', args=[d.arg(name='err', type=d.T.string)]), + withError(err): { 'error': err }, + '#withPort':: d.fn(help='"port is the port number of the ingress port."', args=[d.arg(name='port', type=d.T.integer)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol is the protocol of the ingress port. The supported values are: \\"TCP\\", \\"UDP\\", \\"SCTP\\', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet similarity index 78% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet index ebba87ea6bf..d58de75625e 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressRule.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressRule.libsonnet @@ -3,12 +3,12 @@ '#':: d.pkg(name='ingressRule', url='', help='"IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue."'), '#http':: d.obj(help="\"HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.\""), http: { - '#withPaths':: d.fn(help='"A collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), + '#withPaths':: d.fn(help='"paths is a collection of paths that map requests to backends."', args=[d.arg(name='paths', type=d.T.array)]), withPaths(paths): { http+: { paths: if std.isArray(v=paths) then paths else [paths] } }, - '#withPathsMixin':: d.fn(help='"A collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), + '#withPathsMixin':: d.fn(help='"paths is a collection of paths that map requests to backends."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='paths', type=d.T.array)]), withPathsMixin(paths): { http+: { paths+: if std.isArray(v=paths) then paths else [paths] } }, }, - '#withHost':: d.fn(help="\"Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nHost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), + '#withHost':: d.fn(help="\"host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \\\"host\\\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\\n the IP in the Spec of the parent Ingress.\\n2. The `:` delimiter is not respected because ports are not allowed.\\n\\t Currently the port of an Ingress is implicitly :80 for http and\\n\\t :443 for https.\\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\\n\\nhost can be \\\"precise\\\" which is a domain name without the terminating dot of a network host (e.g. \\\"foo.bar.com\\\") or \\\"wildcard\\\", which is a domain name prefixed with a single wildcard label (e.g. \\\"*.foo.com\\\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \\\"*\\\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.\"", args=[d.arg(name='host', type=d.T.string)]), withHost(host): { host: host }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet similarity index 81% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet index fe0447b3348..7a7c0141fe1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressServiceBackend.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressServiceBackend.libsonnet @@ -3,12 +3,12 @@ '#':: d.pkg(name='ingressServiceBackend', url='', help='"IngressServiceBackend references a Kubernetes Service as a Backend."'), '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { port+: { name: name } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { port+: { number: number } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet similarity index 51% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet index bd79b361709..c99326738b6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/ingressSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressSpec.libsonnet @@ -16,24 +16,24 @@ service: { '#port':: d.obj(help='"ServiceBackendPort is the service port being referenced."'), port: { - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { defaultBackend+: { service+: { port+: { name: name } } } }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { defaultBackend+: { service+: { port+: { number: number } } } }, }, - '#withName':: d.fn(help='"Name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the referenced service. The service must exist in the same namespace as the Ingress object."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { defaultBackend+: { service+: { name: name } } }, }, }, - '#withIngressClassName':: d.fn(help='"IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation."', args=[d.arg(name='ingressClassName', type=d.T.string)]), + '#withIngressClassName':: d.fn(help='"ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present."', args=[d.arg(name='ingressClassName', type=d.T.string)]), withIngressClassName(ingressClassName): { ingressClassName: ingressClassName }, - '#withRules':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), + '#withRules':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."', args=[d.arg(name='rules', type=d.T.array)]), withRules(rules): { rules: if std.isArray(v=rules) then rules else [rules] }, - '#withRulesMixin':: d.fn(help='"A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), + '#withRulesMixin':: d.fn(help='"rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='rules', type=d.T.array)]), withRulesMixin(rules): { rules+: if std.isArray(v=rules) then rules else [rules] }, - '#withTls':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), + '#withTls':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."', args=[d.arg(name='tls', type=d.T.array)]), withTls(tls): { tls: if std.isArray(v=tls) then tls else [tls] }, - '#withTlsMixin':: d.fn(help='"TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), + '#withTlsMixin':: d.fn(help='"tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tls', type=d.T.array)]), withTlsMixin(tls): { tls+: if std.isArray(v=tls) then tls else [tls] }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet new file mode 100644 index 00000000000..b2046d7f418 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressStatus.libsonnet @@ -0,0 +1,13 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressStatus', url='', help='"IngressStatus describe the current state of the Ingress."'), + '#loadBalancer':: d.obj(help='"IngressLoadBalancerStatus represents the status of a load-balancer."'), + loadBalancer: { + '#withIngress':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."', args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { loadBalancer+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, + '#withIngressMixin':: d.fn(help='"ingress is a list containing ingress points for the load-balancer."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { loadBalancer+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet new file mode 100644 index 00000000000..16789baf635 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ingressTLS.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ingressTLS', url='', help='"IngressTLS describes the transport layer security associated with an ingress."'), + '#withHosts':: d.fn(help='"hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."', args=[d.arg(name='hosts', type=d.T.array)]), + withHosts(hosts): { hosts: if std.isArray(v=hosts) then hosts else [hosts] }, + '#withHostsMixin':: d.fn(help='"hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='hosts', type=d.T.array)]), + withHostsMixin(hosts): { hosts+: if std.isArray(v=hosts) then hosts else [hosts] }, + '#withSecretName':: d.fn(help='"secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \\"Host\\" header field used by an IngressRule, the SNI host is used for termination and value of the \\"Host\\" header is used for routing."', args=[d.arg(name='secretName', type=d.T.string)]), + withSecretName(secretName): { secretName: secretName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet new file mode 100644 index 00000000000..b54b0d6d997 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/ipBlock.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipBlock', url='', help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.0/24\\\",\\\"2001:db8::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), + '#withCidr':: d.fn(help='"cidr is a string representing the IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), + withCidr(cidr): { cidr: cidr }, + '#withExcept':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"', args=[d.arg(name='except', type=d.T.array)]), + withExcept(except): { except: if std.isArray(v=except) then except else [except] }, + '#withExceptMixin':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), + withExceptMixin(except): { except+: if std.isArray(v=except) then except else [except] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet index fdfbdc14ac6..fd4e3e05119 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/main.libsonnet @@ -8,6 +8,9 @@ ingressClass: (import 'ingressClass.libsonnet'), ingressClassParametersReference: (import 'ingressClassParametersReference.libsonnet'), ingressClassSpec: (import 'ingressClassSpec.libsonnet'), + ingressLoadBalancerIngress: (import 'ingressLoadBalancerIngress.libsonnet'), + ingressLoadBalancerStatus: (import 'ingressLoadBalancerStatus.libsonnet'), + ingressPortStatus: (import 'ingressPortStatus.libsonnet'), ingressRule: (import 'ingressRule.libsonnet'), ingressServiceBackend: (import 'ingressServiceBackend.libsonnet'), ingressSpec: (import 'ingressSpec.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet similarity index 68% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet index 6053fefe663..49ca87f7821 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicy.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicy.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='networkPolicy', url='', help='"NetworkPolicy describes what network traffic is allowed for a set of Pods"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of NetworkPolicy', args=[d.arg(name='name', type=d.T.string)]), @@ -64,17 +62,17 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { spec+: { podSelector+: { matchLabels+: matchLabels } } }, }, - '#withEgress':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), + '#withEgress':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), withEgress(egress): { spec+: { egress: if std.isArray(v=egress) then egress else [egress] } }, - '#withEgressMixin':: d.fn(help='"List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), + '#withEgressMixin':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), withEgressMixin(egress): { spec+: { egress+: if std.isArray(v=egress) then egress else [egress] } }, - '#withIngress':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), + '#withIngress':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), withIngress(ingress): { spec+: { ingress: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withIngressMixin':: d.fn(help="\"List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), + '#withIngressMixin':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), withIngressMixin(ingress): { spec+: { ingress+: if std.isArray(v=ingress) then ingress else [ingress] } }, - '#withPolicyTypes':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), + '#withPolicyTypes':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), withPolicyTypes(policyTypes): { spec+: { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] } }, - '#withPolicyTypesMixin':: d.fn(help='"List of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an Egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), + '#withPolicyTypesMixin':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), withPolicyTypesMixin(policyTypes): { spec+: { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet new file mode 100644 index 00000000000..0ccc9fcf150 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyEgressRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyEgressRule', url='', help="\"NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8\""), + '#withPorts':: d.fn(help='"ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#withTo':: d.fn(help='"to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."', args=[d.arg(name='to', type=d.T.array)]), + withTo(to): { to: if std.isArray(v=to) then to else [to] }, + '#withToMixin':: d.fn(help='"to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='to', type=d.T.array)]), + withToMixin(to): { to+: if std.isArray(v=to) then to else [to] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet new file mode 100644 index 00000000000..fbe27f9f8d9 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyIngressRule.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyIngressRule', url='', help="\"NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.\""), + '#withFrom':: d.fn(help='"from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."', args=[d.arg(name='from', type=d.T.array)]), + withFrom(from): { from: if std.isArray(v=from) then from else [from] }, + '#withFromMixin':: d.fn(help='"from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='from', type=d.T.array)]), + withFromMixin(from): { from+: if std.isArray(v=from) then from else [from] }, + '#withPorts':: d.fn(help='"ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."', args=[d.arg(name='ports', type=d.T.array)]), + withPorts(ports): { ports: if std.isArray(v=ports) then ports else [ports] }, + '#withPortsMixin':: d.fn(help='"ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ports', type=d.T.array)]), + withPortsMixin(ports): { ports+: if std.isArray(v=ports) then ports else [ports] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet index 0ed7c3a58d8..beac3d05cc4 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/networkPolicyPeer.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPeer.libsonnet @@ -1,13 +1,13 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='networkPolicyPeer', url='', help='"NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed"'), - '#ipBlock':: d.obj(help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.1/24\\\",\\\"2001:db9::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), + '#ipBlock':: d.obj(help="\"IPBlock describes a particular CIDR (Ex. \\\"192.168.1.0/24\\\",\\\"2001:db8::/64\\\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.\""), ipBlock: { - '#withCidr':: d.fn(help='"CIDR is a string representing the IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), + '#withCidr':: d.fn(help='"cidr is a string representing the IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\', args=[d.arg(name='cidr', type=d.T.string)]), withCidr(cidr): { ipBlock+: { cidr: cidr } }, - '#withExcept':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"', args=[d.arg(name='except', type=d.T.array)]), + '#withExcept':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"', args=[d.arg(name='except', type=d.T.array)]), withExcept(except): { ipBlock+: { except: if std.isArray(v=except) then except else [except] } }, - '#withExceptMixin':: d.fn(help='"Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \\"192.168.1.1/24\\" or \\"2001:db9::/64\\" Except values will be rejected if they are outside the CIDR range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), + '#withExceptMixin':: d.fn(help='"except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \\"192.168.1.0/24\\" or \\"2001:db8::/64\\" Except values will be rejected if they are outside the cidr range"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='except', type=d.T.array)]), withExceptMixin(except): { ipBlock+: { except+: if std.isArray(v=except) then except else [except] } }, }, '#namespaceSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet new file mode 100644 index 00000000000..3c98019075c --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicyPort.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicyPort', url='', help='"NetworkPolicyPort describes a port to allow traffic on"'), + '#withEndPort':: d.fn(help='"endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port."', args=[d.arg(name='endPort', type=d.T.integer)]), + withEndPort(endPort): { endPort: endPort }, + '#withPort':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='port', type=d.T.string)]), + withPort(port): { port: port }, + '#withProtocol':: d.fn(help='"protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP."', args=[d.arg(name='protocol', type=d.T.string)]), + withProtocol(protocol): { protocol: protocol }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet new file mode 100644 index 00000000000..d8efd2f2666 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/networkPolicySpec.libsonnet @@ -0,0 +1,29 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='networkPolicySpec', url='', help='"NetworkPolicySpec provides the specification of a NetworkPolicy"'), + '#podSelector':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), + podSelector: { + '#withMatchExpressions':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressions(matchExpressions): { podSelector+: { matchExpressions: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchExpressionsMixin':: d.fn(help='"matchExpressions is a list of label selector requirements. The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchExpressions', type=d.T.array)]), + withMatchExpressionsMixin(matchExpressions): { podSelector+: { matchExpressions+: if std.isArray(v=matchExpressions) then matchExpressions else [matchExpressions] } }, + '#withMatchLabels':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabels(matchLabels): { podSelector+: { matchLabels: matchLabels } }, + '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), + withMatchLabelsMixin(matchLabels): { podSelector+: { matchLabels+: matchLabels } }, + }, + '#withEgress':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"', args=[d.arg(name='egress', type=d.T.array)]), + withEgress(egress): { egress: if std.isArray(v=egress) then egress else [egress] }, + '#withEgressMixin':: d.fn(help='"egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='egress', type=d.T.array)]), + withEgressMixin(egress): { egress+: if std.isArray(v=egress) then egress else [egress] }, + '#withIngress':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"", args=[d.arg(name='ingress', type=d.T.array)]), + withIngress(ingress): { ingress: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withIngressMixin':: d.fn(help="\"ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='ingress', type=d.T.array)]), + withIngressMixin(ingress): { ingress+: if std.isArray(v=ingress) then ingress else [ingress] }, + '#withPolicyTypes':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"', args=[d.arg(name='policyTypes', type=d.T.array)]), + withPolicyTypes(policyTypes): { policyTypes: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, + '#withPolicyTypesMixin':: d.fn(help='"policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\\"Ingress\\"], [\\"Egress\\"], or [\\"Ingress\\", \\"Egress\\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \\"Egress\\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \\"Egress\\" (since such a policy would not include an egress section and would otherwise default to just [ \\"Ingress\\" ]). This field is beta-level in 1.8"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='policyTypes', type=d.T.array)]), + withPolicyTypesMixin(policyTypes): { policyTypes+: if std.isArray(v=policyTypes) then policyTypes else [policyTypes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet index c22ffe1326f..b3f4955e902 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/networking/v1/serviceBackendPort.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1/serviceBackendPort.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='serviceBackendPort', url='', help='"ServiceBackendPort is the service port being referenced."'), - '#withName':: d.fn(help='"Name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is the name of the port on the Service. This is a mutually exclusive setting with \\"Number\\"."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, - '#withNumber':: d.fn(help='"Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), + '#withNumber':: d.fn(help='"number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \\"Name\\"."', args=[d.arg(name='number', type=d.T.integer)]), withNumber(number): { number: number }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet new file mode 100644 index 00000000000..0bbfa21c89a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddress.libsonnet @@ -0,0 +1,68 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipAddress', url='', help='"IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1"'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of IPAddress', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'networking.k8s.io/v1alpha1', + kind: 'IPAddress', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"IPAddressSpec describe the attributes in an IP Address."'), + spec: { + '#parentRef':: d.obj(help='"ParentReference describes a reference to a parent object."'), + parentRef: { + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { spec+: { parentRef+: { group: group } } }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parentRef+: { name: name } } }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { parentRef+: { namespace: namespace } } }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { spec+: { parentRef+: { resource: resource } } }, + }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet new file mode 100644 index 00000000000..95a01d19724 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/ipAddressSpec.libsonnet @@ -0,0 +1,17 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='ipAddressSpec', url='', help='"IPAddressSpec describe the attributes in an IP Address."'), + '#parentRef':: d.obj(help='"ParentReference describes a reference to a parent object."'), + parentRef: { + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { parentRef+: { group: group } }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parentRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { parentRef+: { namespace: namespace } }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { parentRef+: { resource: resource } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet new file mode 100644 index 00000000000..094796ec803 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/main.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha1', url='', help=''), + ipAddress: (import 'ipAddress.libsonnet'), + ipAddressSpec: (import 'ipAddressSpec.libsonnet'), + parentReference: (import 'parentReference.libsonnet'), + serviceCIDR: (import 'serviceCIDR.libsonnet'), + serviceCIDRSpec: (import 'serviceCIDRSpec.libsonnet'), + serviceCIDRStatus: (import 'serviceCIDRStatus.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet new file mode 100644 index 00000000000..89346173087 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/parentReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='parentReference', url='', help='"ParentReference describes a reference to a parent object."'), + '#withGroup':: d.fn(help='"Group is the group of the object being referenced."', args=[d.arg(name='group', type=d.T.string)]), + withGroup(group): { group: group }, + '#withName':: d.fn(help='"Name is the name of the object being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"Namespace is the namespace of the object being referenced."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#withResource':: d.fn(help='"Resource is the resource of the object being referenced."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet new file mode 100644 index 00000000000..7da564faee6 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDR.libsonnet @@ -0,0 +1,61 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDR', url='', help='"ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ServiceCIDR', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'networking.k8s.io/v1alpha1', + kind: 'ServiceCIDR', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services."'), + spec: { + '#withCidrs':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrs(cidrs): { spec+: { cidrs: if std.isArray(v=cidrs) then cidrs else [cidrs] } }, + '#withCidrsMixin':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrsMixin(cidrs): { spec+: { cidrs+: if std.isArray(v=cidrs) then cidrs else [cidrs] } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet new file mode 100644 index 00000000000..9b68bc70bdc --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRSpec.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDRSpec', url='', help='"ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services."'), + '#withCidrs':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrs(cidrs): { cidrs: if std.isArray(v=cidrs) then cidrs else [cidrs] }, + '#withCidrsMixin':: d.fn(help='"CIDRs defines the IP blocks in CIDR notation (e.g. \\"192.168.0.0/24\\" or \\"2001:db8::/64\\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='cidrs', type=d.T.array)]), + withCidrsMixin(cidrs): { cidrs+: if std.isArray(v=cidrs) then cidrs else [cidrs] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet new file mode 100644 index 00000000000..bfc5133f6a8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/networking/v1alpha1/serviceCIDRStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='serviceCIDRStatus', url='', help='"ServiceCIDRStatus describes the current state of the ServiceCIDR."'), + '#withConditions':: d.fn(help='"conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state"', args=[d.arg(name='conditions', type=d.T.array)]), + withConditions(conditions): { conditions: if std.isArray(v=conditions) then conditions else [conditions] }, + '#withConditionsMixin':: d.fn(help='"conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='conditions', type=d.T.array)]), + withConditionsMixin(conditions): { conditions+: if std.isArray(v=conditions) then conditions else [conditions] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet similarity index 58% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet index 5ffbc8e086e..eec7a2a6401 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='node', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet similarity index 82% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet index fa33b3e39f3..9960b6f88c8 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/overhead.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/overhead.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='overhead', url='', help='"Overhead structure represents the resource overhead associated with running a pod."'), - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixed':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixed(podFixed): { podFixed: podFixed }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixedMixin':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixedMixin(podFixed): { podFixed+: podFixed }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet similarity index 86% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet index e4493b10ac0..40dda33e7ac 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1/runtimeClass.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/runtimeClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='runtimeClass', url='', help='"RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of RuntimeClass', args=[d.arg(name='name', type=d.T.string)]), @@ -53,9 +51,9 @@ } + self.metadata.withName(name=name), '#overhead':: d.obj(help='"Overhead structure represents the resource overhead associated with running a pod."'), overhead: { - '#withPodFixed':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixed':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixed(podFixed): { overhead+: { podFixed: podFixed } }, - '#withPodFixedMixin':: d.fn(help='"PodFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), + '#withPodFixedMixin':: d.fn(help='"podFixed represents the fixed resource overhead associated with running a pod."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='podFixed', type=d.T.object)]), withPodFixedMixin(podFixed): { overhead+: { podFixed+: podFixed } }, }, '#scheduling':: d.obj(help='"Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass."'), @@ -69,7 +67,7 @@ '#withTolerationsMixin':: d.fn(help='"tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='tolerations', type=d.T.array)]), withTolerationsMixin(tolerations): { scheduling+: { tolerations+: if std.isArray(v=tolerations) then tolerations else [tolerations] } }, }, - '#withHandler':: d.fn(help='"Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), + '#withHandler':: d.fn(help='"handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \\"runc\\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable."', args=[d.arg(name='handler', type=d.T.string)]), withHandler(handler): { handler: handler }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/scheduling.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/node/v1alpha1/scheduling.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/node/v1/scheduling.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet index 4d23cf36e8f..534c0f40701 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/main.libsonnet @@ -2,5 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='policy', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet index 76adeae8fef..10a2ba87af1 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/eviction.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/eviction.libsonnet @@ -27,12 +27,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -43,21 +41,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -65,14 +63,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Eviction', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'policy/v1beta1', + apiVersion: 'policy/v1', kind: 'Eviction', } + self.metadata.withName(name=name), '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet index 756aa865a8b..8edfbbf3cf7 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/main.libsonnet @@ -1,6 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1', url='', help=''), + eviction: (import 'eviction.libsonnet'), podDisruptionBudget: (import 'podDisruptionBudget.libsonnet'), podDisruptionBudgetSpec: (import 'podDisruptionBudgetSpec.libsonnet'), podDisruptionBudgetStatus: (import 'podDisruptionBudgetStatus.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet index b7fbf166da6..4be9c09cfbf 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudget.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudget.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='podDisruptionBudget', url='', help='"PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods"'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PodDisruptionBudget', args=[d.arg(name='name', type=d.T.string)]), @@ -68,6 +66,8 @@ withMaxUnavailable(maxUnavailable): { spec+: { maxUnavailable: maxUnavailable } }, '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), withMinAvailable(minAvailable): { spec+: { minAvailable: minAvailable } }, + '#withUnhealthyPodEvictionPolicy':: d.fn(help='"UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\\"Ready\\",status=\\"True\\".\\n\\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\\n\\nIfHealthyBudget policy means that running pods (status.phase=\\"Running\\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\\n\\nAlwaysAllow policy means that all running pods (status.phase=\\"Running\\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\\n\\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\\n\\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default)."', args=[d.arg(name='unhealthyPodEvictionPolicy', type=d.T.string)]), + withUnhealthyPodEvictionPolicy(unhealthyPodEvictionPolicy): { spec+: { unhealthyPodEvictionPolicy: unhealthyPodEvictionPolicy } }, }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet similarity index 65% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet index 84917662145..93cd035440d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetSpec.libsonnet @@ -16,6 +16,8 @@ withMaxUnavailable(maxUnavailable): { maxUnavailable: maxUnavailable }, '#withMinAvailable':: d.fn(help='"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number."', args=[d.arg(name='minAvailable', type=d.T.string)]), withMinAvailable(minAvailable): { minAvailable: minAvailable }, + '#withUnhealthyPodEvictionPolicy':: d.fn(help='"UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\\"Ready\\",status=\\"True\\".\\n\\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\\n\\nIfHealthyBudget policy means that running pods (status.phase=\\"Running\\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\\n\\nAlwaysAllow policy means that all running pods (status.phase=\\"Running\\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\\n\\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\\n\\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default)."', args=[d.arg(name='unhealthyPodEvictionPolicy', type=d.T.string)]), + withUnhealthyPodEvictionPolicy(unhealthyPodEvictionPolicy): { unhealthyPodEvictionPolicy: unhealthyPodEvictionPolicy }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/policy/v1beta1/podDisruptionBudgetStatus.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/policy/v1/podDisruptionBudgetStatus.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet similarity index 58% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet index f7d3fc1bb5b..de8fa2ef0f1 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='rbac', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/aggregationRule.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/aggregationRule.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/aggregationRule.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet similarity index 85% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet index 1e710e1d8c4..2b0192bb5bf 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRole.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRole.libsonnet @@ -10,12 +10,10 @@ }, '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -26,21 +24,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -48,9 +46,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ClusterRole', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet index 7f167b44f4f..4a29e743986 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/clusterRoleBinding.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/clusterRoleBinding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet similarity index 73% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet index 9b15ccb2c07..e42ac307f8b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/policyRule.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/policyRule.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='policyRule', url='', help='"PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to."'), - '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."', args=[d.arg(name='apiGroups', type=d.T.array)]), + '#withApiGroups':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroups(apiGroups): { apiGroups: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, - '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), + '#withApiGroupsMixin':: d.fn(help='"APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \\"\\" represents the core API group and \\"*\\" represents all API groups."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='apiGroups', type=d.T.array)]), withApiGroupsMixin(apiGroups): { apiGroups+: if std.isArray(v=apiGroups) then apiGroups else [apiGroups] }, '#withNonResourceURLs':: d.fn(help='"NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \\"pods\\" or \\"secrets\\") or non-resource URL paths (such as \\"/api\\"), but not both."', args=[d.arg(name='nonResourceURLs', type=d.T.array)]), withNonResourceURLs(nonResourceURLs): { nonResourceURLs: if std.isArray(v=nonResourceURLs) then nonResourceURLs else [nonResourceURLs] }, @@ -13,13 +13,13 @@ withResourceNames(resourceNames): { resourceNames: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, '#withResourceNamesMixin':: d.fn(help='"ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceNames', type=d.T.array)]), withResourceNamesMixin(resourceNames): { resourceNames+: if std.isArray(v=resourceNames) then resourceNames else [resourceNames] }, - '#withResources':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."', args=[d.arg(name='resources', type=d.T.array)]), + '#withResources':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"", args=[d.arg(name='resources', type=d.T.array)]), withResources(resources): { resources: if std.isArray(v=resources) then resources else [resources] }, - '#withResourcesMixin':: d.fn(help='"Resources is a list of resources this rule applies to. ResourceAll represents all resources."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resources', type=d.T.array)]), + '#withResourcesMixin':: d.fn(help="\"Resources is a list of resources this rule applies to. '*' represents all resources.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='resources', type=d.T.array)]), withResourcesMixin(resources): { resources+: if std.isArray(v=resources) then resources else [resources] }, - '#withVerbs':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."', args=[d.arg(name='verbs', type=d.T.array)]), + '#withVerbs':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"", args=[d.arg(name='verbs', type=d.T.array)]), withVerbs(verbs): { verbs: if std.isArray(v=verbs) then verbs else [verbs] }, - '#withVerbsMixin':: d.fn(help='"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='verbs', type=d.T.array)]), + '#withVerbsMixin':: d.fn(help="\"Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='verbs', type=d.T.array)]), withVerbsMixin(verbs): { verbs+: if std.isArray(v=verbs) then verbs else [verbs] }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet similarity index 84% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet index 960c71a7d75..295f6254585 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/role.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/role.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='role', url='', help='"Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of Role', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet index 2a6d5dea7e8..75747fe5b52 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1/roleBinding.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleBinding.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='roleBinding', url='', help='"RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of RoleBinding', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleRef.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/roleRef.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/roleRef.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/subject.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1beta1/subject.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/rbac/v1/subject.libsonnet diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet new file mode 100644 index 00000000000..98b5e9e085a --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/main.libsonnet @@ -0,0 +1,5 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resource', url='', help=''), + v1alpha2: (import 'v1alpha2/main.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet new file mode 100644 index 00000000000..569aad3ff55 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/allocationResult.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='allocationResult', url='', help='"AllocationResult contains attributes of an allocated resource."'), + '#availableOnNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + availableOnNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { availableOnNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { availableOnNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + }, + '#withResourceHandles':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandles(resourceHandles): { resourceHandles: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] }, + '#withResourceHandlesMixin':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandlesMixin(resourceHandles): { resourceHandles+: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] }, + '#withShareable':: d.fn(help='"Shareable determines whether the resource supports more than one consumer at a time."', args=[d.arg(name='shareable', type=d.T.boolean)]), + withShareable(shareable): { shareable: shareable }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet new file mode 100644 index 00000000000..957ef0c5a8f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/main.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='v1alpha2', url='', help=''), + allocationResult: (import 'allocationResult.libsonnet'), + podSchedulingContext: (import 'podSchedulingContext.libsonnet'), + podSchedulingContextSpec: (import 'podSchedulingContextSpec.libsonnet'), + podSchedulingContextStatus: (import 'podSchedulingContextStatus.libsonnet'), + resourceClaim: (import 'resourceClaim.libsonnet'), + resourceClaimConsumerReference: (import 'resourceClaimConsumerReference.libsonnet'), + resourceClaimParametersReference: (import 'resourceClaimParametersReference.libsonnet'), + resourceClaimSchedulingStatus: (import 'resourceClaimSchedulingStatus.libsonnet'), + resourceClaimSpec: (import 'resourceClaimSpec.libsonnet'), + resourceClaimStatus: (import 'resourceClaimStatus.libsonnet'), + resourceClaimTemplate: (import 'resourceClaimTemplate.libsonnet'), + resourceClaimTemplateSpec: (import 'resourceClaimTemplateSpec.libsonnet'), + resourceClass: (import 'resourceClass.libsonnet'), + resourceClassParametersReference: (import 'resourceClassParametersReference.libsonnet'), + resourceHandle: (import 'resourceHandle.libsonnet'), +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet new file mode 100644 index 00000000000..91ffccd38c7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContext.libsonnet @@ -0,0 +1,63 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContext', url='', help='"PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \\"WaitForFirstConsumer\\" allocation mode.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of PodSchedulingContext', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'PodSchedulingContext', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"PodSchedulingContextSpec describes where resources for the Pod are needed."'), + spec: { + '#withPotentialNodes':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodes(potentialNodes): { spec+: { potentialNodes: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] } }, + '#withPotentialNodesMixin':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodesMixin(potentialNodes): { spec+: { potentialNodes+: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] } }, + '#withSelectedNode':: d.fn(help='"SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \\"WaitForFirstConsumer\\" allocation is to be attempted."', args=[d.arg(name='selectedNode', type=d.T.string)]), + withSelectedNode(selectedNode): { spec+: { selectedNode: selectedNode } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet new file mode 100644 index 00000000000..ec20974ea4e --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextSpec.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContextSpec', url='', help='"PodSchedulingContextSpec describes where resources for the Pod are needed."'), + '#withPotentialNodes':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodes(potentialNodes): { potentialNodes: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] }, + '#withPotentialNodesMixin':: d.fn(help='"PotentialNodes lists nodes where the Pod might be able to run.\\n\\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='potentialNodes', type=d.T.array)]), + withPotentialNodesMixin(potentialNodes): { potentialNodes+: if std.isArray(v=potentialNodes) then potentialNodes else [potentialNodes] }, + '#withSelectedNode':: d.fn(help='"SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \\"WaitForFirstConsumer\\" allocation is to be attempted."', args=[d.arg(name='selectedNode', type=d.T.string)]), + withSelectedNode(selectedNode): { selectedNode: selectedNode }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet new file mode 100644 index 00000000000..c3e78ff2d9e --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/podSchedulingContextStatus.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='podSchedulingContextStatus', url='', help='"PodSchedulingContextStatus describes where resources for the Pod can be allocated."'), + '#withResourceClaims':: d.fn(help='"ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \\"WaitForFirstConsumer\\" allocation mode."', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaims(resourceClaims): { resourceClaims: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#withResourceClaimsMixin':: d.fn(help='"ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \\"WaitForFirstConsumer\\" allocation mode."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceClaims', type=d.T.array)]), + withResourceClaimsMixin(resourceClaims): { resourceClaims+: if std.isArray(v=resourceClaims) then resourceClaims else [resourceClaims] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet similarity index 73% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet index ff3faec5fea..18f7803ae8c 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/coordination/v1beta1/lease.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaim.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='lease', url='', help='"Lease defines a lease concept."'), + '#':: d.pkg(name='resourceClaim', url='', help='"ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,28 +39,31 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, - '#new':: d.fn(help='new returns an instance of Lease', args=[d.arg(name='name', type=d.T.string)]), + '#new':: d.fn(help='new returns an instance of ResourceClaim', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'coordination.k8s.io/v1beta1', - kind: 'Lease', + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClaim', } + self.metadata.withName(name=name), - '#spec':: d.obj(help='"LeaseSpec is a specification of a Lease."'), + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), spec: { - '#withAcquireTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='acquireTime', type=d.T.string)]), - withAcquireTime(acquireTime): { spec+: { acquireTime: acquireTime } }, - '#withHolderIdentity':: d.fn(help='"holderIdentity contains the identity of the holder of a current lease."', args=[d.arg(name='holderIdentity', type=d.T.string)]), - withHolderIdentity(holderIdentity): { spec+: { holderIdentity: holderIdentity } }, - '#withLeaseDurationSeconds':: d.fn(help='"leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime."', args=[d.arg(name='leaseDurationSeconds', type=d.T.integer)]), - withLeaseDurationSeconds(leaseDurationSeconds): { spec+: { leaseDurationSeconds: leaseDurationSeconds } }, - '#withLeaseTransitions':: d.fn(help='"leaseTransitions is the number of transitions of a lease between holders."', args=[d.arg(name='leaseTransitions', type=d.T.integer)]), - withLeaseTransitions(leaseTransitions): { spec+: { leaseTransitions: leaseTransitions } }, - '#withRenewTime':: d.fn(help='"MicroTime is version of Time with microsecond level precision."', args=[d.arg(name='renewTime', type=d.T.string)]), - withRenewTime(renewTime): { spec+: { renewTime: renewTime } }, + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { parametersRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { parametersRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parametersRef+: { name: name } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { allocationMode: allocationMode } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { resourceClassName: resourceClassName } }, }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet new file mode 100644 index 00000000000..e04b19dfda8 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimConsumerReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimConsumerReference', url='', help='"ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withResource':: d.fn(help='"Resource is the type of resource being referenced, for example \\"pods\\"."', args=[d.arg(name='resource', type=d.T.string)]), + withResource(resource): { resource: resource }, + '#withUid':: d.fn(help='"UID identifies exactly one incarnation of the resource."', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { uid: uid }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet new file mode 100644 index 00000000000..f6e426aa2e3 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimParametersReference.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimParametersReference', url='', help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet new file mode 100644 index 00000000000..5a7a6328683 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSchedulingStatus.libsonnet @@ -0,0 +1,12 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimSchedulingStatus', url='', help='"ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \\"WaitForFirstConsumer\\" allocation mode."'), + '#withName':: d.fn(help='"Name matches the pod.spec.resourceClaims[*].Name field."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withUnsuitableNodes':: d.fn(help='"UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\\n\\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced."', args=[d.arg(name='unsuitableNodes', type=d.T.array)]), + withUnsuitableNodes(unsuitableNodes): { unsuitableNodes: if std.isArray(v=unsuitableNodes) then unsuitableNodes else [unsuitableNodes] }, + '#withUnsuitableNodesMixin':: d.fn(help='"UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\\n\\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='unsuitableNodes', type=d.T.array)]), + withUnsuitableNodesMixin(unsuitableNodes): { unsuitableNodes+: if std.isArray(v=unsuitableNodes) then unsuitableNodes else [unsuitableNodes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet new file mode 100644 index 00000000000..12f08aed5bf --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimSpec.libsonnet @@ -0,0 +1,19 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimSpec', url='', help='"ResourceClaimSpec defines how a resource is to be allocated."'), + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { parametersRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { parametersRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parametersRef+: { name: name } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { allocationMode: allocationMode }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { resourceClassName: resourceClassName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet new file mode 100644 index 00000000000..b036002087f --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimStatus.libsonnet @@ -0,0 +1,30 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimStatus', url='', help='"ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are."'), + '#allocation':: d.obj(help='"AllocationResult contains attributes of an allocated resource."'), + allocation: { + '#availableOnNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + availableOnNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { allocation+: { availableOnNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { allocation+: { availableOnNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } } }, + }, + '#withResourceHandles':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandles(resourceHandles): { allocation+: { resourceHandles: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] } }, + '#withResourceHandlesMixin':: d.fn(help='"ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\\n\\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='resourceHandles', type=d.T.array)]), + withResourceHandlesMixin(resourceHandles): { allocation+: { resourceHandles+: if std.isArray(v=resourceHandles) then resourceHandles else [resourceHandles] } }, + '#withShareable':: d.fn(help='"Shareable determines whether the resource supports more than one consumer at a time."', args=[d.arg(name='shareable', type=d.T.boolean)]), + withShareable(shareable): { allocation+: { shareable: shareable } }, + }, + '#withDeallocationRequested':: d.fn(help='"DeallocationRequested indicates that a ResourceClaim is to be deallocated.\\n\\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\\n\\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor."', args=[d.arg(name='deallocationRequested', type=d.T.boolean)]), + withDeallocationRequested(deallocationRequested): { deallocationRequested: deallocationRequested }, + '#withDriverName':: d.fn(help='"DriverName is a copy of the driver name from the ResourceClass at the time when allocation started."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#withReservedFor':: d.fn(help='"ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\\n\\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced."', args=[d.arg(name='reservedFor', type=d.T.array)]), + withReservedFor(reservedFor): { reservedFor: if std.isArray(v=reservedFor) then reservedFor else [reservedFor] }, + '#withReservedForMixin':: d.fn(help='"ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\\n\\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='reservedFor', type=d.T.array)]), + withReservedForMixin(reservedFor): { reservedFor+: if std.isArray(v=reservedFor) then reservedFor else [reservedFor] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet new file mode 100644 index 00000000000..54f9b26ef99 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplate.libsonnet @@ -0,0 +1,116 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimTemplate', url='', help='"ResourceClaimTemplate is used to produce ResourceClaim objects."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ResourceClaimTemplate', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClaimTemplate', + } + self.metadata.withName(name=name), + '#spec':: d.obj(help='"ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim."'), + spec: { + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { spec+: { metadata+: { annotations: annotations } } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { spec+: { metadata+: { annotations+: annotations } } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { spec+: { metadata+: { creationTimestamp: creationTimestamp } } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { spec+: { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { spec+: { metadata+: { deletionTimestamp: deletionTimestamp } } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { spec+: { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { spec+: { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { spec+: { metadata+: { generateName: generateName } } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { spec+: { metadata+: { generation: generation } } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { spec+: { metadata+: { labels: labels } } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { spec+: { metadata+: { labels+: labels } } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { spec+: { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { spec+: { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { metadata+: { name: name } } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { metadata+: { namespace: namespace } } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { spec+: { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { spec+: { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { spec+: { metadata+: { resourceVersion: resourceVersion } } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { spec+: { metadata+: { selfLink: selfLink } } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { spec+: { metadata+: { uid: uid } } }, + }, + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), + spec: { + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { spec+: { parametersRef+: { apiGroup: apiGroup } } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { spec+: { parametersRef+: { kind: kind } } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { spec+: { parametersRef+: { name: name } } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { spec+: { allocationMode: allocationMode } } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { spec+: { resourceClassName: resourceClassName } } }, + }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet new file mode 100644 index 00000000000..69c42e9fbd7 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClaimTemplateSpec.libsonnet @@ -0,0 +1,65 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClaimTemplateSpec', url='', help='"ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#spec':: d.obj(help='"ResourceClaimSpec defines how a resource is to be allocated."'), + spec: { + '#parametersRef':: d.obj(help='"ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { spec+: { parametersRef+: { apiGroup: apiGroup } } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \\\"ConfigMap\\\".\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { spec+: { parametersRef+: { kind: kind } } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { parametersRef+: { name: name } } }, + }, + '#withAllocationMode':: d.fn(help='"Allocation can start immediately or when a Pod wants to use the resource. \\"WaitForFirstConsumer\\" is the default."', args=[d.arg(name='allocationMode', type=d.T.string)]), + withAllocationMode(allocationMode): { spec+: { allocationMode: allocationMode } }, + '#withResourceClassName':: d.fn(help='"ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment."', args=[d.arg(name='resourceClassName', type=d.T.string)]), + withResourceClassName(resourceClassName): { spec+: { resourceClassName: resourceClassName } }, + }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet new file mode 100644 index 00000000000..fd069ac11f1 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClass.libsonnet @@ -0,0 +1,74 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClass', url='', help='"ResourceClass is used by administrators to influence how resources are allocated.\\n\\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate."'), + '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), + metadata: { + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotations(annotations): { metadata+: { annotations: annotations } }, + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, + '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), + withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, + '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), + withDeletionGracePeriodSeconds(deletionGracePeriodSeconds): { metadata+: { deletionGracePeriodSeconds: deletionGracePeriodSeconds } }, + '#withDeletionTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='deletionTimestamp', type=d.T.string)]), + withDeletionTimestamp(deletionTimestamp): { metadata+: { deletionTimestamp: deletionTimestamp } }, + '#withFinalizers':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), + withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + withGenerateName(generateName): { metadata+: { generateName: generateName } }, + '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), + withGeneration(generation): { metadata+: { generation: generation } }, + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), + withLabels(labels): { metadata+: { labels: labels } }, + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + withLabelsMixin(labels): { metadata+: { labels+: labels } }, + '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), + withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { metadata+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { metadata+: { namespace: namespace } }, + '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withOwnerReferencesMixin':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='ownerReferences', type=d.T.array)]), + withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, + '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), + withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), + withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), + withUid(uid): { metadata+: { uid: uid } }, + }, + '#new':: d.fn(help='new returns an instance of ResourceClass', args=[d.arg(name='name', type=d.T.string)]), + new(name): { + apiVersion: 'resource.k8s.io/v1alpha2', + kind: 'ResourceClass', + } + self.metadata.withName(name=name), + '#parametersRef':: d.obj(help='"ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass."'), + parametersRef: { + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { parametersRef+: { apiGroup: apiGroup } }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { parametersRef+: { kind: kind } }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { parametersRef+: { name: name } }, + '#withNamespace':: d.fn(help='"Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { parametersRef+: { namespace: namespace } }, + }, + '#suitableNodes':: d.obj(help='"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms."'), + suitableNodes: { + '#withNodeSelectorTerms':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTerms(nodeSelectorTerms): { suitableNodes+: { nodeSelectorTerms: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + '#withNodeSelectorTermsMixin':: d.fn(help='"Required. A list of node selector terms. The terms are ORed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='nodeSelectorTerms', type=d.T.array)]), + withNodeSelectorTermsMixin(nodeSelectorTerms): { suitableNodes+: { nodeSelectorTerms+: if std.isArray(v=nodeSelectorTerms) then nodeSelectorTerms else [nodeSelectorTerms] } }, + }, + '#withDriverName':: d.fn(help='"DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\\n\\nResource drivers have a unique name in forward domain order (acme.example.com)."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet new file mode 100644 index 00000000000..68ea9376f05 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceClassParametersReference.libsonnet @@ -0,0 +1,14 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceClassParametersReference', url='', help='"ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass."'), + '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources."', args=[d.arg(name='apiGroup', type=d.T.string)]), + withApiGroup(apiGroup): { apiGroup: apiGroup }, + '#withKind':: d.fn(help="\"Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.\"", args=[d.arg(name='kind', type=d.T.string)]), + withKind(kind): { kind: kind }, + '#withName':: d.fn(help='"Name is the name of resource being referenced."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { name: name }, + '#withNamespace':: d.fn(help='"Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { namespace: namespace }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet new file mode 100644 index 00000000000..7be0566ddbe --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/resource/v1alpha2/resourceHandle.libsonnet @@ -0,0 +1,10 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='resourceHandle', url='', help='"ResourceHandle holds opaque resource data for processing by a specific kubelet plugin."'), + '#withData':: d.fn(help='"Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\\n\\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced."', args=[d.arg(name='data', type=d.T.string)]), + withData(data): { data: data }, + '#withDriverName':: d.fn(help="\"DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.\"", args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet similarity index 59% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet index 69579538843..5029dd93b0d 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/main.libsonnet @@ -2,6 +2,4 @@ local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='scheduling', url='', help=''), v1: (import 'v1/main.libsonnet'), - v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/main.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet similarity index 81% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet index 8c47cfe159a..f0da21fc706 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1/priorityClass.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/scheduling/v1/priorityClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='priorityClass', url='', help='"PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of PriorityClass', args=[d.arg(name='name', type=d.T.string)]), @@ -55,9 +53,9 @@ withDescription(description): { description: description }, '#withGlobalDefault':: d.fn(help='"globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority."', args=[d.arg(name='globalDefault', type=d.T.boolean)]), withGlobalDefault(globalDefault): { globalDefault: globalDefault }, - '#withPreemptionPolicy':: d.fn(help='"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), + '#withPreemptionPolicy':: d.fn(help='"preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset."', args=[d.arg(name='preemptionPolicy', type=d.T.string)]), withPreemptionPolicy(preemptionPolicy): { preemptionPolicy: preemptionPolicy }, - '#withValue':: d.fn(help='"The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), + '#withValue':: d.fn(help='"value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec."', args=[d.arg(name='value', type=d.T.integer)]), withValue(value): { value: value }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet index 5b2186168a5..b8129d86c6c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/main.libsonnet @@ -3,5 +3,4 @@ '#':: d.pkg(name='storage', url='', help=''), v1: (import 'v1/main.libsonnet'), v1alpha1: (import 'v1alpha1/main.libsonnet'), - v1beta1: (import 'v1beta1/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet similarity index 69% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet index 78435e56f38..54217c6356b 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiDriver.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriver.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='csiDriver', url='', help='"CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSIDriver', args=[d.arg(name='name', type=d.T.string)]), @@ -55,21 +53,23 @@ spec: { '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), withAttachRequired(attachRequired): { spec+: { attachRequired: attachRequired } }, - '#withFsGroupPolicy':: d.fn(help='"Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\\n\\nThis field is immutable."', args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), + '#withFsGroupPolicy':: d.fn(help="\"fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\\n\\nThis field is immutable.\\n\\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.\"", args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), withFsGroupPolicy(fsGroupPolicy): { spec+: { fsGroupPolicy: fsGroupPolicy } }, - '#withPodInfoOnMount':: d.fn(help="\"If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), + '#withPodInfoOnMount':: d.fn(help="\"podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\\n\\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\\n\\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), withPodInfoOnMount(podInfoOnMount): { spec+: { podInfoOnMount: podInfoOnMount } }, - '#withRequiresRepublish':: d.fn(help='"RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), + '#withRequiresRepublish':: d.fn(help='"requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), withRequiresRepublish(requiresRepublish): { spec+: { requiresRepublish: requiresRepublish } }, - '#withStorageCapacity':: d.fn(help='"If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field is immutable.\\n\\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), + '#withSeLinuxMount':: d.fn(help="\"seLinuxMount specifies if the CSI driver supports \\\"-o context\\\" mount option.\\n\\nWhen \\\"true\\\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \\\"-o context=xyz\\\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\\n\\nWhen \\\"false\\\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\\n\\nDefault is \\\"false\\\".\"", args=[d.arg(name='seLinuxMount', type=d.T.boolean)]), + withSeLinuxMount(seLinuxMount): { spec+: { seLinuxMount: seLinuxMount } }, + '#withStorageCapacity':: d.fn(help='"storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field was immutable in Kubernetes <= 1.22 and now is mutable."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), withStorageCapacity(storageCapacity): { spec+: { storageCapacity: storageCapacity } }, - '#withTokenRequests':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), + '#withTokenRequests':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), withTokenRequests(tokenRequests): { spec+: { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withTokenRequestsMixin':: d.fn(help="\"TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\\n\\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), + '#withTokenRequestsMixin':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), withTokenRequestsMixin(tokenRequests): { spec+: { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] } }, - '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), withVolumeLifecycleModes(volumeLifecycleModes): { spec+: { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, - '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\\n\\nThis field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), withVolumeLifecycleModesMixin(volumeLifecycleModes): { spec+: { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] } }, }, '#mixin': 'ignore', diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet new file mode 100644 index 00000000000..a8248051e78 --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiDriverSpec.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='csiDriverSpec', url='', help='"CSIDriverSpec is the specification of a CSIDriver."'), + '#withAttachRequired':: d.fn(help='"attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\\n\\nThis field is immutable."', args=[d.arg(name='attachRequired', type=d.T.boolean)]), + withAttachRequired(attachRequired): { attachRequired: attachRequired }, + '#withFsGroupPolicy':: d.fn(help="\"fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\\n\\nThis field is immutable.\\n\\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.\"", args=[d.arg(name='fsGroupPolicy', type=d.T.string)]), + withFsGroupPolicy(fsGroupPolicy): { fsGroupPolicy: fsGroupPolicy }, + '#withPodInfoOnMount':: d.fn(help="\"podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\\n\\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\\n\\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \\\"csi.storage.k8s.io/pod.name\\\": pod.Name \\\"csi.storage.k8s.io/pod.namespace\\\": pod.Namespace \\\"csi.storage.k8s.io/pod.uid\\\": string(pod.UID) \\\"csi.storage.k8s.io/ephemeral\\\": \\\"true\\\" if the volume is an ephemeral inline volume\\n defined by a CSIVolumeSource, otherwise \\\"false\\\"\\n\\n\\\"csi.storage.k8s.io/ephemeral\\\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \\\"Persistent\\\" and \\\"Ephemeral\\\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\\n\\nThis field is immutable.\"", args=[d.arg(name='podInfoOnMount', type=d.T.boolean)]), + withPodInfoOnMount(podInfoOnMount): { podInfoOnMount: podInfoOnMount }, + '#withRequiresRepublish':: d.fn(help='"requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\\n\\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container."', args=[d.arg(name='requiresRepublish', type=d.T.boolean)]), + withRequiresRepublish(requiresRepublish): { requiresRepublish: requiresRepublish }, + '#withSeLinuxMount':: d.fn(help="\"seLinuxMount specifies if the CSI driver supports \\\"-o context\\\" mount option.\\n\\nWhen \\\"true\\\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \\\"-o context=xyz\\\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\\n\\nWhen \\\"false\\\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\\n\\nDefault is \\\"false\\\".\"", args=[d.arg(name='seLinuxMount', type=d.T.boolean)]), + withSeLinuxMount(seLinuxMount): { seLinuxMount: seLinuxMount }, + '#withStorageCapacity':: d.fn(help='"storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\\n\\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\\n\\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\\n\\nThis field was immutable in Kubernetes <= 1.22 and now is mutable."', args=[d.arg(name='storageCapacity', type=d.T.boolean)]), + withStorageCapacity(storageCapacity): { storageCapacity: storageCapacity }, + '#withTokenRequests':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"", args=[d.arg(name='tokenRequests', type=d.T.array)]), + withTokenRequests(tokenRequests): { tokenRequests: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, + '#withTokenRequestsMixin':: d.fn(help="\"tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \\\"csi.storage.k8s.io/serviceAccount.tokens\\\": {\\n \\\"\u003caudience\u003e\\\": {\\n \\\"token\\\": \u003ctoken\u003e,\\n \\\"expirationTimestamp\\\": \u003cexpiration timestamp in RFC3339\u003e,\\n },\\n ...\\n}\\n\\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='tokenRequests', type=d.T.array)]), + withTokenRequestsMixin(tokenRequests): { tokenRequests+: if std.isArray(v=tokenRequests) then tokenRequests else [tokenRequests] }, + '#withVolumeLifecycleModes':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + withVolumeLifecycleModes(volumeLifecycleModes): { volumeLifecycleModes: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, + '#withVolumeLifecycleModesMixin':: d.fn(help='"volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \\"Persistent\\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\\n\\nThe other mode is \\"Ephemeral\\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\\n\\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\\n\\nThis field is beta. This field is immutable."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeLifecycleModes', type=d.T.array)]), + withVolumeLifecycleModesMixin(volumeLifecycleModes): { volumeLifecycleModes+: if std.isArray(v=volumeLifecycleModes) then volumeLifecycleModes else [volumeLifecycleModes] }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet similarity index 85% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet index d4540670143..b052ab088f6 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/csiNode.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNode.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='csiNode', url='', help="\"CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.\""), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSINode', args=[d.arg(name='name', type=d.T.string)]), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet similarity index 80% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet index 15a1402bc51..b92b8dc492b 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeDriver.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeDriver.libsonnet @@ -3,10 +3,10 @@ '#':: d.pkg(name='csiNodeDriver', url='', help='"CSINodeDriver holds information about the specification of one CSI driver installed on a node"'), '#allocatable':: d.obj(help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), allocatable: { - '#withCount':: d.fn(help='"Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), + '#withCount':: d.fn(help='"count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), withCount(count): { allocatable+: { count: count } }, }, - '#withName':: d.fn(help='"This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { name: name }, '#withNodeID':: d.fn(help='"nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \\"node1\\", but the storage system may refer to the same node as \\"nodeA\\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \\"nodeA\\" instead of \\"node1\\". This field is required."', args=[d.arg(name='nodeID', type=d.T.string)]), withNodeID(nodeID): { nodeID: nodeID }, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeSpec.libsonnet similarity index 100% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiNodeSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiNodeSpec.libsonnet diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet similarity index 63% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet index 8b3fcb68490..c1e1484d5aa 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/csiStorageCapacity.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/csiStorageCapacity.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity."'), + '#':: d.pkg(name='csiStorageCapacity', url='', help='"CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\\n\\nFor example this can express things like: - StorageClass \\"standard\\" has \\"1234 GiB\\" available in \\"topology.kubernetes.io/zone=us-east1\\" - StorageClass \\"localssd\\" has \\"10 GiB\\" available in \\"kubernetes.io/hostname=knode-abc123\\"\\n\\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\\n\\nThe producer of these objects can decide which approach is more suitable.\\n\\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,14 +39,14 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of CSIStorageCapacity', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'storage.k8s.io/v1beta1', + apiVersion: 'storage.k8s.io/v1', kind: 'CSIStorageCapacity', } + self.metadata.withName(name=name), '#nodeTopology':: d.obj(help='"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects."'), @@ -62,11 +60,11 @@ '#withMatchLabelsMixin':: d.fn(help='"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \\"key\\", the operator is \\"In\\", and the values array contains only \\"value\\". The requirements are ANDed."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='matchLabels', type=d.T.object)]), withMatchLabelsMixin(matchLabels): { nodeTopology+: { matchLabels+: matchLabels } }, }, - '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), + '#withCapacity':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='capacity', type=d.T.string)]), withCapacity(capacity): { capacity: capacity }, - '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n (Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n a. No precision is lost\\n b. No fractional digits will be emitted\\n c. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n 1.5 will be serialized as \\\"1500m\\\"\\n 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), + '#withMaximumVolumeSize':: d.fn(help="\"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\\n\\nThe serialization format is:\\n\\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\\n\\n\\t(Note that \u003csuffix\u003e may be empty, from the \\\"\\\" case in \u003cdecimalSI\u003e.)\\n\\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \\\"+\\\" | \\\"-\\\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n\\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\u003cdecimalSI\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n\\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\u003cdecimalExponent\u003e ::= \\\"e\\\" \u003csignedNumber\u003e | \\\"E\\\" \u003csignedNumber\u003e ```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\\n\\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n- 1.5 will be serialized as \\\"1500m\\\" - 1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.\"", args=[d.arg(name='maximumVolumeSize', type=d.T.string)]), withMaximumVolumeSize(maximumVolumeSize): { maximumVolumeSize: maximumVolumeSize }, - '#withStorageClassName':: d.fn(help='"The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { storageClassName: storageClassName }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet similarity index 94% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet index 9413b010121..10082c47085 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1beta1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/main.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='v1beta1', url='', help=''), + '#':: d.pkg(name='v1', url='', help=''), csiDriver: (import 'csiDriver.libsonnet'), csiDriverSpec: (import 'csiDriverSpec.libsonnet'), csiNode: (import 'csiNode.libsonnet'), diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet similarity index 74% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet index f627289f551..15004ad00dc 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/storageClass.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/storageClass.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='storageClass', url='', help='"StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\\n\\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of StorageClass', args=[d.arg(name='name', type=d.T.string)]), @@ -51,25 +49,25 @@ apiVersion: 'storage.k8s.io/v1', kind: 'StorageClass', } + self.metadata.withName(name=name), - '#withAllowVolumeExpansion':: d.fn(help='"AllowVolumeExpansion shows whether the storage class allow volume expand"', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), + '#withAllowVolumeExpansion':: d.fn(help='"allowVolumeExpansion shows whether the storage class allow volume expand."', args=[d.arg(name='allowVolumeExpansion', type=d.T.boolean)]), withAllowVolumeExpansion(allowVolumeExpansion): { allowVolumeExpansion: allowVolumeExpansion }, - '#withAllowedTopologies':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), + '#withAllowedTopologies':: d.fn(help='"allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='allowedTopologies', type=d.T.array)]), withAllowedTopologies(allowedTopologies): { allowedTopologies: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withAllowedTopologiesMixin':: d.fn(help='"Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), + '#withAllowedTopologiesMixin':: d.fn(help='"allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='allowedTopologies', type=d.T.array)]), withAllowedTopologiesMixin(allowedTopologies): { allowedTopologies+: if std.isArray(v=allowedTopologies) then allowedTopologies else [allowedTopologies] }, - '#withMountOptions':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withMountOptionsMixin':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount of the PVs will simply fail if one is invalid."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] }, - '#withParameters':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), + '#withParameters':: d.fn(help='"parameters holds the parameters for the provisioner that should create volumes of this storage class."', args=[d.arg(name='parameters', type=d.T.object)]), withParameters(parameters): { parameters: parameters }, - '#withParametersMixin':: d.fn(help='"Parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), + '#withParametersMixin':: d.fn(help='"parameters holds the parameters for the provisioner that should create volumes of this storage class."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), withParametersMixin(parameters): { parameters+: parameters }, - '#withProvisioner':: d.fn(help='"Provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), + '#withProvisioner':: d.fn(help='"provisioner indicates the type of the provisioner."', args=[d.arg(name='provisioner', type=d.T.string)]), withProvisioner(provisioner): { provisioner: provisioner }, - '#withReclaimPolicy':: d.fn(help='"Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), + '#withReclaimPolicy':: d.fn(help='"reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete."', args=[d.arg(name='reclaimPolicy', type=d.T.string)]), withReclaimPolicy(reclaimPolicy): { reclaimPolicy: reclaimPolicy }, - '#withVolumeBindingMode':: d.fn(help='"VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), + '#withVolumeBindingMode':: d.fn(help='"volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature."', args=[d.arg(name='volumeBindingMode', type=d.T.string)]), withVolumeBindingMode(volumeBindingMode): { volumeBindingMode: volumeBindingMode }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet similarity index 84% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet index 38de143e7be..d7a557853c0 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/tokenRequest.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/tokenRequest.libsonnet @@ -1,9 +1,9 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='tokenRequest', url='', help='"TokenRequest contains parameters of a service account token."'), - '#withAudience':: d.fn(help='"Audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), + '#withAudience':: d.fn(help='"audience is the intended audience of the token in \\"TokenRequestSpec\\". It will default to the audiences of kube apiserver."', args=[d.arg(name='audience', type=d.T.string)]), withAudience(audience): { audience: audience }, - '#withExpirationSeconds':: d.fn(help='"ExpirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\"."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), + '#withExpirationSeconds':: d.fn(help='"expirationSeconds is the duration of validity of the token in \\"TokenRequestSpec\\". It has the same default value of \\"ExpirationSeconds\\" in \\"TokenRequestSpec\\"."', args=[d.arg(name='expirationSeconds', type=d.T.integer)]), withExpirationSeconds(expirationSeconds): { expirationSeconds: expirationSeconds }, '#mixin': 'ignore', mixin: self, diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet similarity index 65% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet index c4897db2a84..20b46a5cc83 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachment.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachment.libsonnet @@ -3,12 +3,10 @@ '#':: d.pkg(name='volumeAttachment', url='', help='"VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\\n\\nVolumeAttachment objects are non-namespaced."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,9 +39,9 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, '#new':: d.fn(help='new returns an instance of VolumeAttachment', args=[d.arg(name='name', type=d.T.string)]), @@ -59,77 +57,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { spec+: { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -153,164 +151,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { spec+: { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { spec+: { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { spec+: { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { spec+: { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { spec+: { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { spec+: { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { spec+: { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -325,87 +330,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { spec+: { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { spec+: { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { spec+: { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { spec+: { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -427,51 +432,53 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { spec+: { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { spec+: { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { spec+: { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity: capacity } } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { spec+: { source+: { inlineVolumeSpec+: { capacity+: capacity } } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { spec+: { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { spec+: { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { spec+: { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { spec+: { source+: { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { spec+: { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { spec+: { source+: { persistentVolumeName: persistentVolumeName } } }, }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), + '#withAttacher':: d.fn(help='"attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), withAttacher(attacher): { spec+: { attacher: attacher } }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { spec+: { nodeName: nodeName } }, }, '#mixin': 'ignore', diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet similarity index 57% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet index 47ca4b20576..6c46e4d512c 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1alpha1/volumeAttachmentSource.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSource.libsonnet @@ -5,77 +5,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { inlineVolumeSpec+: { azureDisk+: { kind: kind } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { cephfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { cephfs+: { user: user } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { cinder+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -99,164 +99,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { inlineVolumeSpec+: { csi+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { csi+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { fc+: { fsType: fsType } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { inlineVolumeSpec+: { fc+: { lun: lun } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { inlineVolumeSpec+: { flexVolume+: { driver: driver } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { inlineVolumeSpec+: { flexVolume+: { options: options } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { inlineVolumeSpec+: { flexVolume+: { options+: options } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { glusterfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { hostPath+: { path: path } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { inlineVolumeSpec+: { hostPath+: { type: type } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { inlineVolumeSpec+: { iscsi+: { lun: lun } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { 'local'+: { path: path } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { inlineVolumeSpec+: { nfs+: { path: path } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { inlineVolumeSpec+: { nfs+: { server: server } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -271,87 +278,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { inlineVolumeSpec+: { quobyte+: { group: group } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { inlineVolumeSpec+: { quobyte+: { registry: registry } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { quobyte+: { user: user } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { inlineVolumeSpec+: { quobyte+: { volume: volume } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { rbd+: { fsType: fsType } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { inlineVolumeSpec+: { rbd+: { image: image } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { inlineVolumeSpec+: { rbd+: { keyring: keyring } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { inlineVolumeSpec+: { rbd+: { pool: pool } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { inlineVolumeSpec+: { rbd+: { user: user } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { inlineVolumeSpec+: { scaleIO+: { system: system } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -373,46 +380,48 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { storageos+: { fsType: fsType } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { inlineVolumeSpec+: { capacity: capacity } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { inlineVolumeSpec+: { capacity+: capacity } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { inlineVolumeSpec+: { storageClassName: storageClassName } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { inlineVolumeSpec+: { volumeMode: volumeMode } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { persistentVolumeName: persistentVolumeName }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet similarity index 59% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet index 8b092052a56..7baf41f75d9 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeAttachmentSpec.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentSpec.libsonnet @@ -7,77 +7,77 @@ inlineVolumeSpec: { '#awsElasticBlockStore':: d.obj(help='"Represents a Persistent Disk resource in AWS.\\n\\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling."'), awsElasticBlockStore: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty)."', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { partition: partition } } } }, - '#withReadOnly':: d.fn(help='"Specify \\"true\\" to force and set the ReadOnly property in VolumeMounts to \\"true\\". If omitted, the default is \\"false\\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { awsElasticBlockStore+: { volumeID: volumeID } } } }, }, '#azureDisk':: d.obj(help='"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."'), azureDisk: { - '#withCachingMode':: d.fn(help='"Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), + '#withCachingMode':: d.fn(help='"cachingMode is the Host Caching mode: None, Read Only, Read Write."', args=[d.arg(name='cachingMode', type=d.T.string)]), withCachingMode(cachingMode): { source+: { inlineVolumeSpec+: { azureDisk+: { cachingMode: cachingMode } } } }, - '#withDiskName':: d.fn(help='"The Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), + '#withDiskName':: d.fn(help='"diskName is the Name of the data disk in the blob storage"', args=[d.arg(name='diskName', type=d.T.string)]), withDiskName(diskName): { source+: { inlineVolumeSpec+: { azureDisk+: { diskName: diskName } } } }, - '#withDiskURI':: d.fn(help='"The URI the data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), + '#withDiskURI':: d.fn(help='"diskURI is the URI of data disk in the blob storage"', args=[d.arg(name='diskURI', type=d.T.string)]), withDiskURI(diskURI): { source+: { inlineVolumeSpec+: { azureDisk+: { diskURI: diskURI } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { azureDisk+: { fsType: fsType } } } }, - '#withKind':: d.fn(help='"Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), + '#withKind':: d.fn(help='"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"', args=[d.arg(name='kind', type=d.T.string)]), withKind(kind): { source+: { inlineVolumeSpec+: { azureDisk+: { kind: kind } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureDisk+: { readOnly: readOnly } } } }, }, '#azureFile':: d.obj(help='"AzureFile represents an Azure File Service mount on the host and bind mount to the pod."'), azureFile: { - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { azureFile+: { readOnly: readOnly } } } }, - '#withSecretName':: d.fn(help='"the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), + '#withSecretName':: d.fn(help='"secretName is the name of secret that contains Azure Storage Account Name and Key"', args=[d.arg(name='secretName', type=d.T.string)]), withSecretName(secretName): { source+: { inlineVolumeSpec+: { azureFile+: { secretName: secretName } } } }, - '#withSecretNamespace':: d.fn(help='"the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), + '#withSecretNamespace':: d.fn(help='"secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod"', args=[d.arg(name='secretNamespace', type=d.T.string)]), withSecretNamespace(secretNamespace): { source+: { inlineVolumeSpec+: { azureFile+: { secretNamespace: secretNamespace } } } }, - '#withShareName':: d.fn(help='"Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), + '#withShareName':: d.fn(help='"shareName is the azure Share Name"', args=[d.arg(name='shareName', type=d.T.string)]), withShareName(shareName): { source+: { inlineVolumeSpec+: { azureFile+: { shareName: shareName } } } }, }, '#cephfs':: d.obj(help='"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling."'), cephfs: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { cephfs+: { secretRef+: { namespace: namespace } } } } }, }, - '#withMonitors':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { cephfs+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPath':: d.fn(help='"Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { cephfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cephfs+: { readOnly: readOnly } } } }, - '#withSecretFile':: d.fn(help='"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), + '#withSecretFile':: d.fn(help='"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='secretFile', type=d.T.string)]), withSecretFile(secretFile): { source+: { inlineVolumeSpec+: { cephfs+: { secretFile: secretFile } } } }, - '#withUser':: d.fn(help='"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { cephfs+: { user: user } } } }, }, '#cinder':: d.obj(help='"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling."'), cinder: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { cinder+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { cinder+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { cinder+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { cinder+: { volumeID: volumeID } } } }, }, '#claimRef':: d.obj(help='"ObjectReference contains enough information to let you inspect or modify the referred object."'), @@ -101,164 +101,171 @@ csi: { '#controllerExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerExpandSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerExpandSecretRef+: { namespace: namespace } } } } }, }, '#controllerPublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), controllerPublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { controllerPublishSecretRef+: { namespace: namespace } } } } }, }, + '#nodeExpandSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), + nodeExpandSecretRef: { + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { name: name } } } } }, + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeExpandSecretRef+: { namespace: namespace } } } } }, + }, '#nodePublishSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodePublishSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodePublishSecretRef+: { namespace: namespace } } } } }, }, '#nodeStageSecretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), nodeStageSecretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { csi+: { nodeStageSecretRef+: { namespace: namespace } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume. Required."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { source+: { inlineVolumeSpec+: { csi+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\"."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { csi+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { csi+: { readOnly: readOnly } } } }, - '#withVolumeAttributes':: d.fn(help='"Attributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributes':: d.fn(help='"volumeAttributes of the volume to publish."', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributes(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes: volumeAttributes } } } }, - '#withVolumeAttributesMixin':: d.fn(help='"Attributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), + '#withVolumeAttributesMixin':: d.fn(help='"volumeAttributes of the volume to publish."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='volumeAttributes', type=d.T.object)]), withVolumeAttributesMixin(volumeAttributes): { source+: { inlineVolumeSpec+: { csi+: { volumeAttributes+: volumeAttributes } } } }, - '#withVolumeHandle':: d.fn(help='"VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), + '#withVolumeHandle':: d.fn(help='"volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required."', args=[d.arg(name='volumeHandle', type=d.T.string)]), withVolumeHandle(volumeHandle): { source+: { inlineVolumeSpec+: { csi+: { volumeHandle: volumeHandle } } } }, }, '#fc':: d.obj(help='"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling."'), fc: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { fc+: { fsType: fsType } } } }, - '#withLun':: d.fn(help='"Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is Optional: FC target lun number"', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { source+: { inlineVolumeSpec+: { fc+: { lun: lun } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { fc+: { readOnly: readOnly } } } }, - '#withTargetWWNs':: d.fn(help='"Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNs':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNs(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withTargetWWNsMixin':: d.fn(help='"Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), + '#withTargetWWNsMixin':: d.fn(help='"targetWWNs is Optional: FC target worldwide names (WWNs)"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='targetWWNs', type=d.T.array)]), withTargetWWNsMixin(targetWWNs): { source+: { inlineVolumeSpec+: { fc+: { targetWWNs+: if std.isArray(v=targetWWNs) then targetWWNs else [targetWWNs] } } } }, - '#withWwids':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwids':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."', args=[d.arg(name='wwids', type=d.T.array)]), withWwids(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids: if std.isArray(v=wwids) then wwids else [wwids] } } } }, - '#withWwidsMixin':: d.fn(help='"Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), + '#withWwidsMixin':: d.fn(help='"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='wwids', type=d.T.array)]), withWwidsMixin(wwids): { source+: { inlineVolumeSpec+: { fc+: { wwids+: if std.isArray(v=wwids) then wwids else [wwids] } } } }, }, '#flexVolume':: d.obj(help='"FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin."'), flexVolume: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { flexVolume+: { secretRef+: { namespace: namespace } } } } }, }, - '#withDriver':: d.fn(help='"Driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), + '#withDriver':: d.fn(help='"driver is the name of the driver to use for this volume."', args=[d.arg(name='driver', type=d.T.string)]), withDriver(driver): { source+: { inlineVolumeSpec+: { flexVolume+: { driver: driver } } } }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default filesystem depends on FlexVolume script."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { flexVolume+: { fsType: fsType } } } }, - '#withOptions':: d.fn(help='"Optional: Extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), + '#withOptions':: d.fn(help='"options is Optional: this field holds extra command options if any."', args=[d.arg(name='options', type=d.T.object)]), withOptions(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options: options } } } }, - '#withOptionsMixin':: d.fn(help='"Optional: Extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), + '#withOptionsMixin':: d.fn(help='"options is Optional: this field holds extra command options if any."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='options', type=d.T.object)]), withOptionsMixin(options): { source+: { inlineVolumeSpec+: { flexVolume+: { options+: options } } } }, - '#withReadOnly':: d.fn(help='"Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { flexVolume+: { readOnly: readOnly } } } }, }, '#flocker':: d.obj(help='"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling."'), flocker: { - '#withDatasetName':: d.fn(help='"Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), + '#withDatasetName':: d.fn(help='"datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated"', args=[d.arg(name='datasetName', type=d.T.string)]), withDatasetName(datasetName): { source+: { inlineVolumeSpec+: { flocker+: { datasetName: datasetName } } } }, - '#withDatasetUUID':: d.fn(help='"UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), + '#withDatasetUUID':: d.fn(help='"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset"', args=[d.arg(name='datasetUUID', type=d.T.string)]), withDatasetUUID(datasetUUID): { source+: { inlineVolumeSpec+: { flocker+: { datasetUUID: datasetUUID } } } }, }, '#gcePersistentDisk':: d.obj(help='"Represents a Persistent Disk resource in Google Compute Engine.\\n\\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling."'), gcePersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { fsType: fsType } } } }, - '#withPartition':: d.fn(help='"The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), + '#withPartition':: d.fn(help='"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \\"1\\". Similarly, the volume partition for /dev/sda is \\"0\\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='partition', type=d.T.integer)]), withPartition(partition): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { partition: partition } } } }, - '#withPdName':: d.fn(help='"Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), + '#withPdName':: d.fn(help='"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='pdName', type=d.T.string)]), withPdName(pdName): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { pdName: pdName } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { gcePersistentDisk+: { readOnly: readOnly } } } }, }, '#glusterfs':: d.obj(help='"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling."'), glusterfs: { - '#withEndpoints':: d.fn(help='"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), + '#withEndpoints':: d.fn(help='"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpoints', type=d.T.string)]), withEndpoints(endpoints): { source+: { inlineVolumeSpec+: { glusterfs+: { endpoints: endpoints } } } }, - '#withEndpointsNamespace':: d.fn(help='"EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), + '#withEndpointsNamespace':: d.fn(help='"endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='endpointsNamespace', type=d.T.string)]), withEndpointsNamespace(endpointsNamespace): { source+: { inlineVolumeSpec+: { glusterfs+: { endpointsNamespace: endpointsNamespace } } } }, - '#withPath':: d.fn(help='"Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { glusterfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { glusterfs+: { readOnly: readOnly } } } }, }, '#hostPath':: d.obj(help='"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling."'), hostPath: { - '#withPath':: d.fn(help='"Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { hostPath+: { path: path } } } }, - '#withType':: d.fn(help='"Type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), + '#withType':: d.fn(help='"type for HostPath Volume Defaults to \\"\\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"', args=[d.arg(name='type', type=d.T.string)]), withType(type): { source+: { inlineVolumeSpec+: { hostPath+: { type: type } } } }, }, '#iscsi':: d.obj(help='"ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling."'), iscsi: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { iscsi+: { secretRef+: { namespace: namespace } } } } }, }, - '#withChapAuthDiscovery':: d.fn(help='"whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), + '#withChapAuthDiscovery':: d.fn(help='"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication"', args=[d.arg(name='chapAuthDiscovery', type=d.T.boolean)]), withChapAuthDiscovery(chapAuthDiscovery): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthDiscovery: chapAuthDiscovery } } } }, - '#withChapAuthSession':: d.fn(help='"whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), + '#withChapAuthSession':: d.fn(help='"chapAuthSession defines whether support iSCSI Session CHAP authentication"', args=[d.arg(name='chapAuthSession', type=d.T.boolean)]), withChapAuthSession(chapAuthSession): { source+: { inlineVolumeSpec+: { iscsi+: { chapAuthSession: chapAuthSession } } } }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { iscsi+: { fsType: fsType } } } }, - '#withInitiatorName':: d.fn(help='"Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), + '#withInitiatorName':: d.fn(help='"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection."', args=[d.arg(name='initiatorName', type=d.T.string)]), withInitiatorName(initiatorName): { source+: { inlineVolumeSpec+: { iscsi+: { initiatorName: initiatorName } } } }, - '#withIqn':: d.fn(help='"Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), + '#withIqn':: d.fn(help='"iqn is Target iSCSI Qualified Name."', args=[d.arg(name='iqn', type=d.T.string)]), withIqn(iqn): { source+: { inlineVolumeSpec+: { iscsi+: { iqn: iqn } } } }, - '#withIscsiInterface':: d.fn(help="\"iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), + '#withIscsiInterface':: d.fn(help="\"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).\"", args=[d.arg(name='iscsiInterface', type=d.T.string)]), withIscsiInterface(iscsiInterface): { source+: { inlineVolumeSpec+: { iscsi+: { iscsiInterface: iscsiInterface } } } }, - '#withLun':: d.fn(help='"iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), + '#withLun':: d.fn(help='"lun is iSCSI Target Lun number."', args=[d.arg(name='lun', type=d.T.integer)]), withLun(lun): { source+: { inlineVolumeSpec+: { iscsi+: { lun: lun } } } }, - '#withPortals':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortals':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='portals', type=d.T.array)]), withPortals(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withPortalsMixin':: d.fn(help='"iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), + '#withPortalsMixin':: d.fn(help='"portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='portals', type=d.T.array)]), withPortalsMixin(portals): { source+: { inlineVolumeSpec+: { iscsi+: { portals+: if std.isArray(v=portals) then portals else [portals] } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { iscsi+: { readOnly: readOnly } } } }, - '#withTargetPortal':: d.fn(help='"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), + '#withTargetPortal':: d.fn(help='"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)."', args=[d.arg(name='targetPortal', type=d.T.string)]), withTargetPortal(targetPortal): { source+: { inlineVolumeSpec+: { iscsi+: { targetPortal: targetPortal } } } }, }, '#local':: d.obj(help='"Local represents directly-attached storage with node affinity (Beta feature)"'), 'local': { - '#withFsType':: d.fn(help='"Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a fileystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". The default value is to auto-select a filesystem if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { 'local'+: { fsType: fsType } } } }, - '#withPath':: d.fn(help='"The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...)."', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { 'local'+: { path: path } } } }, }, '#nfs':: d.obj(help='"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling."'), nfs: { - '#withPath':: d.fn(help='"Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), + '#withPath':: d.fn(help='"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='path', type=d.T.string)]), withPath(path): { source+: { inlineVolumeSpec+: { nfs+: { path: path } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { nfs+: { readOnly: readOnly } } } }, - '#withServer':: d.fn(help='"Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), + '#withServer':: d.fn(help='"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"', args=[d.arg(name='server', type=d.T.string)]), withServer(server): { source+: { inlineVolumeSpec+: { nfs+: { server: server } } } }, }, '#nodeAffinity':: d.obj(help='"VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from."'), @@ -273,87 +280,87 @@ }, '#photonPersistentDisk':: d.obj(help='"Represents a Photon Controller persistent disk resource."'), photonPersistentDisk: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { fsType: fsType } } } }, - '#withPdID':: d.fn(help='"ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), + '#withPdID':: d.fn(help='"pdID is the ID that identifies Photon Controller persistent disk"', args=[d.arg(name='pdID', type=d.T.string)]), withPdID(pdID): { source+: { inlineVolumeSpec+: { photonPersistentDisk+: { pdID: pdID } } } }, }, '#portworxVolume':: d.obj(help='"PortworxVolumeSource represents a Portworx volume resource."'), portworxVolume: { - '#withFsType':: d.fn(help='"FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { portworxVolume+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { portworxVolume+: { readOnly: readOnly } } } }, - '#withVolumeID':: d.fn(help='"VolumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), + '#withVolumeID':: d.fn(help='"volumeID uniquely identifies a Portworx volume"', args=[d.arg(name='volumeID', type=d.T.string)]), withVolumeID(volumeID): { source+: { inlineVolumeSpec+: { portworxVolume+: { volumeID: volumeID } } } }, }, '#quobyte':: d.obj(help='"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling."'), quobyte: { - '#withGroup':: d.fn(help='"Group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), + '#withGroup':: d.fn(help='"group to map volume access to Default is no group"', args=[d.arg(name='group', type=d.T.string)]), withGroup(group): { source+: { inlineVolumeSpec+: { quobyte+: { group: group } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { quobyte+: { readOnly: readOnly } } } }, - '#withRegistry':: d.fn(help='"Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), + '#withRegistry':: d.fn(help='"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes"', args=[d.arg(name='registry', type=d.T.string)]), withRegistry(registry): { source+: { inlineVolumeSpec+: { quobyte+: { registry: registry } } } }, - '#withTenant':: d.fn(help='"Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), + '#withTenant':: d.fn(help='"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin"', args=[d.arg(name='tenant', type=d.T.string)]), withTenant(tenant): { source+: { inlineVolumeSpec+: { quobyte+: { tenant: tenant } } } }, - '#withUser':: d.fn(help='"User to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user to map volume access to Defaults to serivceaccount user"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { quobyte+: { user: user } } } }, - '#withVolume':: d.fn(help='"Volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), + '#withVolume':: d.fn(help='"volume is a string that references an already created Quobyte volume by name."', args=[d.arg(name='volume', type=d.T.string)]), withVolume(volume): { source+: { inlineVolumeSpec+: { quobyte+: { volume: volume } } } }, }, '#rbd':: d.obj(help='"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling."'), rbd: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { rbd+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd"', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { rbd+: { fsType: fsType } } } }, - '#withImage':: d.fn(help='"The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), + '#withImage':: d.fn(help='"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='image', type=d.T.string)]), withImage(image): { source+: { inlineVolumeSpec+: { rbd+: { image: image } } } }, - '#withKeyring':: d.fn(help='"Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), + '#withKeyring':: d.fn(help='"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='keyring', type=d.T.string)]), withKeyring(keyring): { source+: { inlineVolumeSpec+: { rbd+: { keyring: keyring } } } }, - '#withMonitors':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitors':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='monitors', type=d.T.array)]), withMonitors(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withMonitorsMixin':: d.fn(help='"A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), + '#withMonitorsMixin':: d.fn(help='"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='monitors', type=d.T.array)]), withMonitorsMixin(monitors): { source+: { inlineVolumeSpec+: { rbd+: { monitors+: if std.isArray(v=monitors) then monitors else [monitors] } } } }, - '#withPool':: d.fn(help='"The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), + '#withPool':: d.fn(help='"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='pool', type=d.T.string)]), withPool(pool): { source+: { inlineVolumeSpec+: { rbd+: { pool: pool } } } }, - '#withReadOnly':: d.fn(help='"ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { rbd+: { readOnly: readOnly } } } }, - '#withUser':: d.fn(help='"The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), + '#withUser':: d.fn(help='"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it"', args=[d.arg(name='user', type=d.T.string)]), withUser(user): { source+: { inlineVolumeSpec+: { rbd+: { user: user } } } }, }, '#scaleIO':: d.obj(help='"ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume"'), scaleIO: { '#secretRef':: d.obj(help='"SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace"'), secretRef: { - '#withName':: d.fn(help='"Name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"name is unique within a namespace to reference a secret resource."', args=[d.arg(name='name', type=d.T.string)]), withName(name): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { name: name } } } } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"namespace defines the space within which the secret name must be unique."', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { source+: { inlineVolumeSpec+: { scaleIO+: { secretRef+: { namespace: namespace } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Default is \\"xfs\\', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { scaleIO+: { fsType: fsType } } } }, - '#withGateway':: d.fn(help='"The host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), + '#withGateway':: d.fn(help='"gateway is the host address of the ScaleIO API Gateway."', args=[d.arg(name='gateway', type=d.T.string)]), withGateway(gateway): { source+: { inlineVolumeSpec+: { scaleIO+: { gateway: gateway } } } }, - '#withProtectionDomain':: d.fn(help='"The name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), + '#withProtectionDomain':: d.fn(help='"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage."', args=[d.arg(name='protectionDomain', type=d.T.string)]), withProtectionDomain(protectionDomain): { source+: { inlineVolumeSpec+: { scaleIO+: { protectionDomain: protectionDomain } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { scaleIO+: { readOnly: readOnly } } } }, - '#withSslEnabled':: d.fn(help='"Flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), + '#withSslEnabled':: d.fn(help='"sslEnabled is the flag to enable/disable SSL communication with Gateway, default false"', args=[d.arg(name='sslEnabled', type=d.T.boolean)]), withSslEnabled(sslEnabled): { source+: { inlineVolumeSpec+: { scaleIO+: { sslEnabled: sslEnabled } } } }, - '#withStorageMode':: d.fn(help='"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), + '#withStorageMode':: d.fn(help='"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned."', args=[d.arg(name='storageMode', type=d.T.string)]), withStorageMode(storageMode): { source+: { inlineVolumeSpec+: { scaleIO+: { storageMode: storageMode } } } }, - '#withStoragePool':: d.fn(help='"The ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), + '#withStoragePool':: d.fn(help='"storagePool is the ScaleIO Storage Pool associated with the protection domain."', args=[d.arg(name='storagePool', type=d.T.string)]), withStoragePool(storagePool): { source+: { inlineVolumeSpec+: { scaleIO+: { storagePool: storagePool } } } }, - '#withSystem':: d.fn(help='"The name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), + '#withSystem':: d.fn(help='"system is the name of the storage system as configured in ScaleIO."', args=[d.arg(name='system', type=d.T.string)]), withSystem(system): { source+: { inlineVolumeSpec+: { scaleIO+: { system: system } } } }, - '#withVolumeName':: d.fn(help='"The name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { scaleIO+: { volumeName: volumeName } } } }, }, '#storageos':: d.obj(help='"Represents a StorageOS persistent volume resource."'), @@ -375,51 +382,53 @@ '#withUid':: d.fn(help='"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { source+: { inlineVolumeSpec+: { storageos+: { secretRef+: { uid: uid } } } } }, }, - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { storageos+: { fsType: fsType } } } }, - '#withReadOnly':: d.fn(help='"Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), + '#withReadOnly':: d.fn(help='"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."', args=[d.arg(name='readOnly', type=d.T.boolean)]), withReadOnly(readOnly): { source+: { inlineVolumeSpec+: { storageos+: { readOnly: readOnly } } } }, - '#withVolumeName':: d.fn(help='"VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), + '#withVolumeName':: d.fn(help='"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace."', args=[d.arg(name='volumeName', type=d.T.string)]), withVolumeName(volumeName): { source+: { inlineVolumeSpec+: { storageos+: { volumeName: volumeName } } } }, - '#withVolumeNamespace':: d.fn(help="\"VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), + '#withVolumeNamespace':: d.fn(help="\"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \\\"default\\\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.\"", args=[d.arg(name='volumeNamespace', type=d.T.string)]), withVolumeNamespace(volumeNamespace): { source+: { inlineVolumeSpec+: { storageos+: { volumeNamespace: volumeNamespace } } } }, }, '#vsphereVolume':: d.obj(help='"Represents a vSphere volume resource."'), vsphereVolume: { - '#withFsType':: d.fn(help='"Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), + '#withFsType':: d.fn(help='"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \\"ext4\\", \\"xfs\\", \\"ntfs\\". Implicitly inferred to be \\"ext4\\" if unspecified."', args=[d.arg(name='fsType', type=d.T.string)]), withFsType(fsType): { source+: { inlineVolumeSpec+: { vsphereVolume+: { fsType: fsType } } } }, - '#withStoragePolicyID':: d.fn(help='"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), + '#withStoragePolicyID':: d.fn(help='"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName."', args=[d.arg(name='storagePolicyID', type=d.T.string)]), withStoragePolicyID(storagePolicyID): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyID: storagePolicyID } } } }, - '#withStoragePolicyName':: d.fn(help='"Storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), + '#withStoragePolicyName':: d.fn(help='"storagePolicyName is the storage Policy Based Management (SPBM) profile name."', args=[d.arg(name='storagePolicyName', type=d.T.string)]), withStoragePolicyName(storagePolicyName): { source+: { inlineVolumeSpec+: { vsphereVolume+: { storagePolicyName: storagePolicyName } } } }, - '#withVolumePath':: d.fn(help='"Path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), + '#withVolumePath':: d.fn(help='"volumePath is the path that identifies vSphere volume vmdk"', args=[d.arg(name='volumePath', type=d.T.string)]), withVolumePath(volumePath): { source+: { inlineVolumeSpec+: { vsphereVolume+: { volumePath: volumePath } } } }, }, - '#withAccessModes':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModes':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModes(accessModes): { source+: { inlineVolumeSpec+: { accessModes: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withAccessModesMixin':: d.fn(help='"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), + '#withAccessModesMixin':: d.fn(help='"accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='accessModes', type=d.T.array)]), withAccessModesMixin(accessModes): { source+: { inlineVolumeSpec+: { accessModes+: if std.isArray(v=accessModes) then accessModes else [accessModes] } } }, - '#withCapacity':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacity':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"", args=[d.arg(name='capacity', type=d.T.object)]), withCapacity(capacity): { source+: { inlineVolumeSpec+: { capacity: capacity } } }, - '#withCapacityMixin':: d.fn(help="\"A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), + '#withCapacityMixin':: d.fn(help="\"capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='capacity', type=d.T.object)]), withCapacityMixin(capacity): { source+: { inlineVolumeSpec+: { capacity+: capacity } } }, - '#withMountOptions':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptions':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptions(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withMountOptionsMixin':: d.fn(help='"A list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), + '#withMountOptionsMixin':: d.fn(help='"mountOptions is the list of mount options, e.g. [\\"ro\\", \\"soft\\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='mountOptions', type=d.T.array)]), withMountOptionsMixin(mountOptions): { source+: { inlineVolumeSpec+: { mountOptions+: if std.isArray(v=mountOptions) then mountOptions else [mountOptions] } } }, - '#withPersistentVolumeReclaimPolicy':: d.fn(help='"What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), + '#withPersistentVolumeReclaimPolicy':: d.fn(help='"persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming"', args=[d.arg(name='persistentVolumeReclaimPolicy', type=d.T.string)]), withPersistentVolumeReclaimPolicy(persistentVolumeReclaimPolicy): { source+: { inlineVolumeSpec+: { persistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy } } }, - '#withStorageClassName':: d.fn(help='"Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), + '#withStorageClassName':: d.fn(help='"storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass."', args=[d.arg(name='storageClassName', type=d.T.string)]), withStorageClassName(storageClassName): { source+: { inlineVolumeSpec+: { storageClassName: storageClassName } } }, + '#withVolumeAttributesClassName':: d.fn(help='"Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature."', args=[d.arg(name='volumeAttributesClassName', type=d.T.string)]), + withVolumeAttributesClassName(volumeAttributesClassName): { source+: { inlineVolumeSpec+: { volumeAttributesClassName: volumeAttributesClassName } } }, '#withVolumeMode':: d.fn(help='"volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec."', args=[d.arg(name='volumeMode', type=d.T.string)]), withVolumeMode(volumeMode): { source+: { inlineVolumeSpec+: { volumeMode: volumeMode } } }, }, - '#withPersistentVolumeName':: d.fn(help='"Name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), + '#withPersistentVolumeName':: d.fn(help='"persistentVolumeName represents the name of the persistent volume to attach."', args=[d.arg(name='persistentVolumeName', type=d.T.string)]), withPersistentVolumeName(persistentVolumeName): { source+: { persistentVolumeName: persistentVolumeName } }, }, - '#withAttacher':: d.fn(help='"Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), + '#withAttacher':: d.fn(help='"attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName()."', args=[d.arg(name='attacher', type=d.T.string)]), withAttacher(attacher): { attacher: attacher }, - '#withNodeName':: d.fn(help='"The node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), + '#withNodeName':: d.fn(help='"nodeName represents the node that the volume should be attached to."', args=[d.arg(name='nodeName', type=d.T.string)]), withNodeName(nodeName): { nodeName: nodeName }, '#mixin': 'ignore', mixin: self, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet new file mode 100644 index 00000000000..758a4409e9d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeAttachmentStatus.libsonnet @@ -0,0 +1,26 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeAttachmentStatus', url='', help='"VolumeAttachmentStatus is the status of a VolumeAttachment request."'), + '#attachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), + attachError: { + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { attachError+: { message: message } }, + '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), + withTime(time): { attachError+: { time: time } }, + }, + '#detachError':: d.obj(help='"VolumeError captures an error encountered during a volume operation."'), + detachError: { + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + withMessage(message): { detachError+: { message: message } }, + '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), + withTime(time): { detachError+: { time: time } }, + }, + '#withAttached':: d.fn(help='"attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attached', type=d.T.boolean)]), + withAttached(attached): { attached: attached }, + '#withAttachmentMetadata':: d.fn(help='"attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), + withAttachmentMetadata(attachmentMetadata): { attachmentMetadata: attachmentMetadata }, + '#withAttachmentMetadataMixin':: d.fn(help='"attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='attachmentMetadata', type=d.T.object)]), + withAttachmentMetadataMixin(attachmentMetadata): { attachmentMetadata+: attachmentMetadata }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet similarity index 69% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet index 87f1e4e18b8..dab32f4fc87 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/storage/v1/volumeError.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeError.libsonnet @@ -1,7 +1,7 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='volumeError', url='', help='"VolumeError captures an error encountered during a volume operation."'), - '#withMessage':: d.fn(help='"String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), + '#withMessage':: d.fn(help='"message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information."', args=[d.arg(name='message', type=d.T.string)]), withMessage(message): { message: message }, '#withTime':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='time', type=d.T.string)]), withTime(time): { time: time }, diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet new file mode 100644 index 00000000000..223de65ed1d --- /dev/null +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1/volumeNodeResources.libsonnet @@ -0,0 +1,8 @@ +{ + local d = (import 'doc-util/main.libsonnet'), + '#':: d.pkg(name='volumeNodeResources', url='', help='"VolumeNodeResources is a set of resource limits for scheduling of volumes."'), + '#withCount':: d.fn(help='"count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded."', args=[d.arg(name='count', type=d.T.integer)]), + withCount(count): { count: count }, + '#mixin': 'ignore', + mixin: self, +} diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet similarity index 59% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet index 1b042817725..b477f335984 100644 --- a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/scheduling/v1alpha1/main.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/main.libsonnet @@ -1,5 +1,5 @@ { local d = (import 'doc-util/main.libsonnet'), '#':: d.pkg(name='v1alpha1', url='', help=''), - priorityClass: (import 'priorityClass.libsonnet'), + volumeAttributesClass: (import 'volumeAttributesClass.libsonnet'), } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet similarity index 71% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet index f03fd577ec2..62cc081b0de 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/_gen/rbac/v1alpha1/clusterRoleBinding.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/_gen/storage/v1alpha1/volumeAttributesClass.libsonnet @@ -1,14 +1,12 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='clusterRoleBinding', url='', help='"ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22."'), + '#':: d.pkg(name='volumeAttributesClass', url='', help='"VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning."'), '#metadata':: d.obj(help='"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."'), metadata: { - '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotations':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotations(annotations): { metadata+: { annotations: annotations } }, - '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), + '#withAnnotationsMixin':: d.fn(help='"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='annotations', type=d.T.object)]), withAnnotationsMixin(annotations): { metadata+: { annotations+: annotations } }, - '#withClusterName':: d.fn(help='"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."', args=[d.arg(name='clusterName', type=d.T.string)]), - withClusterName(clusterName): { metadata+: { clusterName: clusterName } }, '#withCreationTimestamp':: d.fn(help='"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."', args=[d.arg(name='creationTimestamp', type=d.T.string)]), withCreationTimestamp(creationTimestamp): { metadata+: { creationTimestamp: creationTimestamp } }, '#withDeletionGracePeriodSeconds':: d.fn(help='"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."', args=[d.arg(name='deletionGracePeriodSeconds', type=d.T.integer)]), @@ -19,21 +17,21 @@ withFinalizers(finalizers): { metadata+: { finalizers: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, '#withFinalizersMixin':: d.fn(help='"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='finalizers', type=d.T.array)]), withFinalizersMixin(finalizers): { metadata+: { finalizers+: if std.isArray(v=finalizers) then finalizers else [finalizers] } }, - '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), + '#withGenerateName':: d.fn(help='"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"', args=[d.arg(name='generateName', type=d.T.string)]), withGenerateName(generateName): { metadata+: { generateName: generateName } }, '#withGeneration':: d.fn(help='"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."', args=[d.arg(name='generation', type=d.T.integer)]), withGeneration(generation): { metadata+: { generation: generation } }, - '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabels':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"', args=[d.arg(name='labels', type=d.T.object)]), withLabels(labels): { metadata+: { labels: labels } }, - '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), + '#withLabelsMixin':: d.fn(help='"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='labels', type=d.T.object)]), withLabelsMixin(labels): { metadata+: { labels+: labels } }, '#withManagedFields':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFields(managedFields): { metadata+: { managedFields: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, '#withManagedFieldsMixin':: d.fn(help="\"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\\"ci-cd\\\". The set of fields is always in the version that the workflow used when modifying the object.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='managedFields', type=d.T.array)]), withManagedFieldsMixin(managedFields): { metadata+: { managedFields+: if std.isArray(v=managedFields) then managedFields else [managedFields] } }, - '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"', args=[d.arg(name='name', type=d.T.string)]), + '#withName':: d.fn(help='"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names"', args=[d.arg(name='name', type=d.T.string)]), withName(name): { metadata+: { name: name } }, - '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), + '#withNamespace':: d.fn(help='"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces"', args=[d.arg(name='namespace', type=d.T.string)]), withNamespace(namespace): { metadata+: { namespace: namespace } }, '#withOwnerReferences':: d.fn(help='"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."', args=[d.arg(name='ownerReferences', type=d.T.array)]), withOwnerReferences(ownerReferences): { metadata+: { ownerReferences: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, @@ -41,29 +39,22 @@ withOwnerReferencesMixin(ownerReferences): { metadata+: { ownerReferences+: if std.isArray(v=ownerReferences) then ownerReferences else [ownerReferences] } }, '#withResourceVersion':: d.fn(help='"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"', args=[d.arg(name='resourceVersion', type=d.T.string)]), withResourceVersion(resourceVersion): { metadata+: { resourceVersion: resourceVersion } }, - '#withSelfLink':: d.fn(help='"SelfLink is a URL representing this object. Populated by the system. Read-only.\\n\\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."', args=[d.arg(name='selfLink', type=d.T.string)]), + '#withSelfLink':: d.fn(help='"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system."', args=[d.arg(name='selfLink', type=d.T.string)]), withSelfLink(selfLink): { metadata+: { selfLink: selfLink } }, - '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"', args=[d.arg(name='uid', type=d.T.string)]), + '#withUid':: d.fn(help='"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids"', args=[d.arg(name='uid', type=d.T.string)]), withUid(uid): { metadata+: { uid: uid } }, }, - '#new':: d.fn(help='new returns an instance of ClusterRoleBinding', args=[d.arg(name='name', type=d.T.string)]), + '#new':: d.fn(help='new returns an instance of VolumeAttributesClass', args=[d.arg(name='name', type=d.T.string)]), new(name): { - apiVersion: 'rbac.authorization.k8s.io/v1alpha1', - kind: 'ClusterRoleBinding', + apiVersion: 'storage.k8s.io/v1alpha1', + kind: 'VolumeAttributesClass', } + self.metadata.withName(name=name), - '#roleRef':: d.obj(help='"RoleRef contains information that points to the role being used"'), - roleRef: { - '#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced"', args=[d.arg(name='apiGroup', type=d.T.string)]), - withApiGroup(apiGroup): { roleRef+: { apiGroup: apiGroup } }, - '#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]), - withKind(kind): { roleRef+: { kind: kind } }, - '#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]), - withName(name): { roleRef+: { name: name } }, - }, - '#withSubjects':: d.fn(help='"Subjects holds references to the objects the role applies to."', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjects(subjects): { subjects: if std.isArray(v=subjects) then subjects else [subjects] }, - '#withSubjectsMixin':: d.fn(help='"Subjects holds references to the objects the role applies to."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='subjects', type=d.T.array)]), - withSubjectsMixin(subjects): { subjects+: if std.isArray(v=subjects) then subjects else [subjects] }, + '#withDriverName':: d.fn(help='"Name of the CSI driver This field is immutable."', args=[d.arg(name='driverName', type=d.T.string)]), + withDriverName(driverName): { driverName: driverName }, + '#withParameters':: d.fn(help='"parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\\n\\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \\"Infeasible\\" state in the modifyVolumeStatus field."', args=[d.arg(name='parameters', type=d.T.object)]), + withParameters(parameters): { parameters: parameters }, + '#withParametersMixin':: d.fn(help='"parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\\n\\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \\"Infeasible\\" state in the modifyVolumeStatus field."\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]), + withParametersMixin(parameters): { parameters+: parameters }, '#mixin': 'ignore', mixin: self, } diff --git a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet similarity index 87% rename from example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet index 1dc7b027466..ba0d522a799 100644 --- a/example/tk/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/gen.libsonnet +++ b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/gen.libsonnet @@ -1,6 +1,6 @@ { local d = (import 'doc-util/main.libsonnet'), - '#':: d.pkg(name='k', url='github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet', help='Generated Jsonnet library for Kubernetes v1.21'), + '#':: d.pkg(name='k', url='github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet', help='Generated Jsonnet library for Kubernetes v1.29'), admissionregistration:: (import '_gen/admissionregistration/main.libsonnet'), apiregistration:: (import '_gen/apiregistration/main.libsonnet'), apiserverinternal:: (import '_gen/apiserverinternal/main.libsonnet'), @@ -14,12 +14,13 @@ core:: (import '_gen/core/main.libsonnet'), discovery:: (import '_gen/discovery/main.libsonnet'), events:: (import '_gen/events/main.libsonnet'), - extensions:: (import '_gen/extensions/main.libsonnet'), flowcontrol:: (import '_gen/flowcontrol/main.libsonnet'), + meta:: (import '_gen/meta/main.libsonnet'), networking:: (import '_gen/networking/main.libsonnet'), node:: (import '_gen/node/main.libsonnet'), policy:: (import '_gen/policy/main.libsonnet'), rbac:: (import '_gen/rbac/main.libsonnet'), + resource:: (import '_gen/resource/main.libsonnet'), scheduling:: (import '_gen/scheduling/main.libsonnet'), storage:: (import '_gen/storage/main.libsonnet'), } diff --git a/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet b/operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet similarity index 100% rename from operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet rename to operations/jsonnet-compiled/util/vendor/github.com/jsonnet-libs/k8s-libsonnet/1.29/main.libsonnet